1 use crate::session::{self, DataTypeKind};
2 use crate::ty::{self, Ty, TyCtxt, TypeFoldable, ReprOptions};
4 use syntax::ast::{self, Ident, IntTy, UintTy};
6 use syntax_pos::DUMMY_SP;
16 use crate::ich::StableHashingContext;
17 use rustc_data_structures::indexed_vec::{IndexVec, Idx};
18 use rustc_data_structures::stable_hasher::{HashStable, StableHasher,
21 pub use rustc_target::abi::*;
22 use rustc_target::spec::{HasTargetSpec, abi::Abi as SpecAbi};
23 use rustc_target::abi::call::{
24 ArgAttribute, ArgAttributes, ArgType, Conv, FnType, IgnoreMode, PassMode, Reg, RegKind
29 pub trait IntegerExt {
30 fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, signed: bool) -> Ty<'tcx>;
31 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
32 fn repr_discr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
40 impl IntegerExt for Integer {
41 fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, signed: bool) -> Ty<'tcx> {
42 match (*self, signed) {
43 (I8, false) => tcx.types.u8,
44 (I16, false) => tcx.types.u16,
45 (I32, false) => tcx.types.u32,
46 (I64, false) => tcx.types.u64,
47 (I128, false) => tcx.types.u128,
48 (I8, true) => tcx.types.i8,
49 (I16, true) => tcx.types.i16,
50 (I32, true) => tcx.types.i32,
51 (I64, true) => tcx.types.i64,
52 (I128, true) => tcx.types.i128,
56 /// Gets the Integer type from an attr::IntType.
57 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
58 let dl = cx.data_layout();
61 attr::SignedInt(IntTy::I8) | attr::UnsignedInt(UintTy::U8) => I8,
62 attr::SignedInt(IntTy::I16) | attr::UnsignedInt(UintTy::U16) => I16,
63 attr::SignedInt(IntTy::I32) | attr::UnsignedInt(UintTy::U32) => I32,
64 attr::SignedInt(IntTy::I64) | attr::UnsignedInt(UintTy::U64) => I64,
65 attr::SignedInt(IntTy::I128) | attr::UnsignedInt(UintTy::U128) => I128,
66 attr::SignedInt(IntTy::Isize) | attr::UnsignedInt(UintTy::Usize) => {
67 dl.ptr_sized_integer()
72 /// Finds the appropriate Integer type and signedness for the given
73 /// signed discriminant range and #[repr] attribute.
74 /// N.B.: u128 values above i128::MAX will be treated as signed, but
75 /// that shouldn't affect anything, other than maybe debuginfo.
76 fn repr_discr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
82 // Theoretically, negative values could be larger in unsigned representation
83 // than the unsigned representation of the signed minimum. However, if there
84 // are any negative values, the only valid unsigned representation is u128
85 // which can fit all i128 values, so the result remains unaffected.
86 let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
87 let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
89 let mut min_from_extern = None;
92 if let Some(ity) = repr.int {
93 let discr = Integer::from_attr(&tcx, ity);
94 let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
96 bug!("Integer::repr_discr: `#[repr]` hint too small for \
97 discriminant range of enum `{}", ty)
99 return (discr, ity.is_signed());
103 match &tcx.sess.target.target.arch[..] {
104 // WARNING: the ARM EABI has two variants; the one corresponding
105 // to `at_least == I32` appears to be used on Linux and NetBSD,
106 // but some systems may use the variant corresponding to no
107 // lower bound. However, we don't run on those yet...?
108 "arm" => min_from_extern = Some(I32),
109 _ => min_from_extern = Some(I32),
113 let at_least = min_from_extern.unwrap_or(min_default);
115 // If there are no negative values, we can use the unsigned fit.
117 (cmp::max(unsigned_fit, at_least), false)
119 (cmp::max(signed_fit, at_least), true)
124 pub trait PrimitiveExt {
125 fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx>;
128 impl PrimitiveExt for Primitive {
129 fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx> {
131 Int(i, signed) => i.to_ty(tcx, signed),
132 Float(FloatTy::F32) => tcx.types.f32,
133 Float(FloatTy::F64) => tcx.types.f64,
134 Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
139 /// The first half of a fat pointer.
141 /// - For a trait object, this is the address of the box.
142 /// - For a slice, this is the base address.
143 pub const FAT_PTR_ADDR: usize = 0;
145 /// The second half of a fat pointer.
147 /// - For a trait object, this is the address of the vtable.
148 /// - For a slice, this is the length.
149 pub const FAT_PTR_EXTRA: usize = 1;
151 #[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable)]
152 pub enum LayoutError<'tcx> {
154 SizeOverflow(Ty<'tcx>)
157 impl<'tcx> fmt::Display for LayoutError<'tcx> {
158 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
160 LayoutError::Unknown(ty) => {
161 write!(f, "the type `{:?}` has an unknown layout", ty)
163 LayoutError::SizeOverflow(ty) => {
164 write!(f, "the type `{:?}` is too big for the current architecture", ty)
170 fn layout_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
171 query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
172 -> Result<&'tcx LayoutDetails, LayoutError<'tcx>>
174 ty::tls::with_related_context(tcx, move |icx| {
175 let rec_limit = *tcx.sess.recursion_limit.get();
176 let (param_env, ty) = query.into_parts();
178 if icx.layout_depth > rec_limit {
180 &format!("overflow representing the type `{}`", ty));
183 // Update the ImplicitCtxt to increase the layout_depth
184 let icx = ty::tls::ImplicitCtxt {
185 layout_depth: icx.layout_depth + 1,
189 ty::tls::enter_context(&icx, |_| {
190 let cx = LayoutCx { tcx, param_env };
191 let layout = cx.layout_raw_uncached(ty);
192 // Type-level uninhabitedness should always imply ABI uninhabitedness.
193 if let Ok(layout) = layout {
194 if ty.conservative_is_privately_uninhabited(tcx) {
195 assert!(layout.abi.is_uninhabited());
203 pub fn provide(providers: &mut ty::query::Providers<'_>) {
204 *providers = ty::query::Providers {
210 pub struct LayoutCx<'tcx, C> {
212 pub param_env: ty::ParamEnv<'tcx>,
215 impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
216 fn layout_raw_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> {
218 let param_env = self.param_env;
219 let dl = self.data_layout();
220 let scalar_unit = |value: Primitive| {
221 let bits = value.size(dl).bits();
222 assert!(bits <= 128);
225 valid_range: 0..=(!0 >> (128 - bits))
228 let scalar = |value: Primitive| {
229 tcx.intern_layout(LayoutDetails::scalar(self, scalar_unit(value)))
231 let scalar_pair = |a: Scalar, b: Scalar| {
232 let b_align = b.value.align(dl);
233 let align = a.value.align(dl).max(b_align).max(dl.aggregate_align);
234 let b_offset = a.value.size(dl).align_to(b_align.abi);
235 let size = (b_offset + b.value.size(dl)).align_to(align.abi);
237 variants: Variants::Single { index: VariantIdx::new(0) },
238 fields: FieldPlacement::Arbitrary {
239 offsets: vec![Size::ZERO, b_offset],
240 memory_index: vec![0, 1]
242 abi: Abi::ScalarPair(a, b),
248 #[derive(Copy, Clone, Debug)]
250 /// A tuple, closure, or univariant which cannot be coerced to unsized.
252 /// A univariant, the last field of which may be coerced to unsized.
254 /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
255 Prefixed(Size, Align),
258 let univariant_uninterned = |fields: &[TyLayout<'_>], repr: &ReprOptions, kind| {
259 let packed = repr.packed();
260 if packed && repr.align > 0 {
261 bug!("struct cannot be packed and aligned");
264 let pack = Align::from_bytes(repr.pack as u64).unwrap();
266 let mut align = if packed {
272 let mut sized = true;
273 let mut offsets = vec![Size::ZERO; fields.len()];
274 let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
276 let mut optimize = !repr.inhibit_struct_field_reordering_opt();
277 if let StructKind::Prefixed(_, align) = kind {
278 optimize &= align.bytes() == 1;
282 let end = if let StructKind::MaybeUnsized = kind {
287 let optimizing = &mut inverse_memory_index[..end];
288 let field_align = |f: &TyLayout<'_>| {
289 if packed { f.align.abi.min(pack) } else { f.align.abi }
292 StructKind::AlwaysSized |
293 StructKind::MaybeUnsized => {
294 optimizing.sort_by_key(|&x| {
295 // Place ZSTs first to avoid "interesting offsets",
296 // especially with only one or two non-ZST fields.
297 let f = &fields[x as usize];
298 (!f.is_zst(), cmp::Reverse(field_align(f)))
301 StructKind::Prefixed(..) => {
302 optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
307 // inverse_memory_index holds field indices by increasing memory offset.
308 // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
309 // We now write field offsets to the corresponding offset slot;
310 // field 5 with offset 0 puts 0 in offsets[5].
311 // At the bottom of this function, we use inverse_memory_index to produce memory_index.
313 let mut offset = Size::ZERO;
315 if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
316 let prefix_align = if packed {
317 prefix_align.min(pack)
321 align = align.max(AbiAndPrefAlign::new(prefix_align));
322 offset = prefix_size.align_to(prefix_align);
325 for &i in &inverse_memory_index {
326 let field = fields[i as usize];
328 bug!("univariant: field #{} of `{}` comes after unsized field",
332 if field.is_unsized() {
336 // Invariant: offset < dl.obj_size_bound() <= 1<<61
337 let field_align = if packed {
338 field.align.min(AbiAndPrefAlign::new(pack))
342 offset = offset.align_to(field_align.abi);
343 align = align.max(field_align);
345 debug!("univariant offset: {:?} field: {:#?}", offset, field);
346 offsets[i as usize] = offset;
348 offset = offset.checked_add(field.size, dl)
349 .ok_or(LayoutError::SizeOverflow(ty))?;
353 let repr_align = repr.align as u64;
354 align = align.max(AbiAndPrefAlign::new(Align::from_bytes(repr_align).unwrap()));
355 debug!("univariant repr_align: {:?}", repr_align);
358 debug!("univariant min_size: {:?}", offset);
359 let min_size = offset;
361 // As stated above, inverse_memory_index holds field indices by increasing offset.
362 // This makes it an already-sorted view of the offsets vec.
363 // To invert it, consider:
364 // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
365 // Field 5 would be the first element, so memory_index is i:
366 // Note: if we didn't optimize, it's already right.
368 let mut memory_index;
370 memory_index = vec![0; inverse_memory_index.len()];
372 for i in 0..inverse_memory_index.len() {
373 memory_index[inverse_memory_index[i] as usize] = i as u32;
376 memory_index = inverse_memory_index;
379 let size = min_size.align_to(align.abi);
380 let mut abi = Abi::Aggregate { sized };
382 // Unpack newtype ABIs and find scalar pairs.
383 if sized && size.bytes() > 0 {
384 // All other fields must be ZSTs, and we need them to all start at 0.
385 let mut zst_offsets =
386 offsets.iter().enumerate().filter(|&(i, _)| fields[i].is_zst());
387 if zst_offsets.all(|(_, o)| o.bytes() == 0) {
388 let mut non_zst_fields =
389 fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
391 match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
392 // We have exactly one non-ZST field.
393 (Some((i, field)), None, None) => {
394 // Field fills the struct and it has a scalar or scalar pair ABI.
395 if offsets[i].bytes() == 0 &&
396 align.abi == field.align.abi &&
399 // For plain scalars, or vectors of them, we can't unpack
400 // newtypes for `#[repr(C)]`, as that affects C ABIs.
401 Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
402 abi = field.abi.clone();
404 // But scalar pairs are Rust-specific and get
405 // treated as aggregates by C ABIs anyway.
406 Abi::ScalarPair(..) => {
407 abi = field.abi.clone();
414 // Two non-ZST fields, and they're both scalars.
415 (Some((i, &TyLayout {
416 details: &LayoutDetails { abi: Abi::Scalar(ref a), .. }, ..
417 })), Some((j, &TyLayout {
418 details: &LayoutDetails { abi: Abi::Scalar(ref b), .. }, ..
420 // Order by the memory placement, not source order.
421 let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
426 let pair = scalar_pair(a.clone(), b.clone());
427 let pair_offsets = match pair.fields {
428 FieldPlacement::Arbitrary {
432 assert_eq!(memory_index, &[0, 1]);
437 if offsets[i] == pair_offsets[0] &&
438 offsets[j] == pair_offsets[1] &&
439 align == pair.align &&
441 // We can use `ScalarPair` only when it matches our
442 // already computed layout (including `#[repr(C)]`).
452 if sized && fields.iter().any(|f| f.abi.is_uninhabited()) {
453 abi = Abi::Uninhabited;
457 variants: Variants::Single { index: VariantIdx::new(0) },
458 fields: FieldPlacement::Arbitrary {
467 let univariant = |fields: &[TyLayout<'_>], repr: &ReprOptions, kind| {
468 Ok(tcx.intern_layout(univariant_uninterned(fields, repr, kind)?))
470 debug_assert!(!ty.has_infer_types());
475 tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
476 value: Int(I8, false),
481 tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
482 value: Int(I32, false),
483 valid_range: 0..=0x10FFFF
487 scalar(Int(Integer::from_attr(dl, attr::SignedInt(ity)), true))
490 scalar(Int(Integer::from_attr(dl, attr::UnsignedInt(ity)), false))
492 ty::Float(fty) => scalar(Float(fty)),
494 let mut ptr = scalar_unit(Pointer);
495 ptr.valid_range = 1..=*ptr.valid_range.end();
496 tcx.intern_layout(LayoutDetails::scalar(self, ptr))
501 tcx.intern_layout(LayoutDetails {
502 variants: Variants::Single { index: VariantIdx::new(0) },
503 fields: FieldPlacement::Union(0),
504 abi: Abi::Uninhabited,
510 // Potentially-fat pointers.
511 ty::Ref(_, pointee, _) |
512 ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
513 let mut data_ptr = scalar_unit(Pointer);
514 if !ty.is_unsafe_ptr() {
515 data_ptr.valid_range = 1..=*data_ptr.valid_range.end();
518 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
519 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
520 return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
523 let unsized_part = tcx.struct_tail(pointee);
524 let metadata = match unsized_part.sty {
526 return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
528 ty::Slice(_) | ty::Str => {
529 scalar_unit(Int(dl.ptr_sized_integer(), false))
532 let mut vtable = scalar_unit(Pointer);
533 vtable.valid_range = 1..=*vtable.valid_range.end();
536 _ => return Err(LayoutError::Unknown(unsized_part))
539 // Effectively a (ptr, meta) tuple.
540 tcx.intern_layout(scalar_pair(data_ptr, metadata))
543 // Arrays and slices.
544 ty::Array(element, mut count) => {
545 if count.has_projections() {
546 count = tcx.normalize_erasing_regions(param_env, count);
547 if count.has_projections() {
548 return Err(LayoutError::Unknown(ty));
552 let element = self.layout_of(element)?;
553 let count = count.unwrap_usize(tcx);
554 let size = element.size.checked_mul(count, dl)
555 .ok_or(LayoutError::SizeOverflow(ty))?;
557 let abi = if count != 0 && ty.conservative_is_privately_uninhabited(tcx) {
560 Abi::Aggregate { sized: true }
563 tcx.intern_layout(LayoutDetails {
564 variants: Variants::Single { index: VariantIdx::new(0) },
565 fields: FieldPlacement::Array {
566 stride: element.size,
570 align: element.align,
574 ty::Slice(element) => {
575 let element = self.layout_of(element)?;
576 tcx.intern_layout(LayoutDetails {
577 variants: Variants::Single { index: VariantIdx::new(0) },
578 fields: FieldPlacement::Array {
579 stride: element.size,
582 abi: Abi::Aggregate { sized: false },
583 align: element.align,
588 tcx.intern_layout(LayoutDetails {
589 variants: Variants::Single { index: VariantIdx::new(0) },
590 fields: FieldPlacement::Array {
591 stride: Size::from_bytes(1),
594 abi: Abi::Aggregate { sized: false },
602 univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?
604 ty::Dynamic(..) | ty::Foreign(..) => {
605 let mut unit = univariant_uninterned(&[], &ReprOptions::default(),
606 StructKind::AlwaysSized)?;
608 Abi::Aggregate { ref mut sized } => *sized = false,
611 tcx.intern_layout(unit)
614 ty::Generator(def_id, ref substs, _) => {
615 // FIXME(tmandry): For fields that are repeated in multiple
616 // variants in the GeneratorLayout, we need code to ensure that
617 // the offset of these fields never change. Right now this is
618 // not an issue since every variant has every field, but once we
619 // optimize this we have to be more careful.
621 let discr_index = substs.prefix_tys(def_id, tcx).count();
622 let prefix_tys = substs.prefix_tys(def_id, tcx)
623 .chain(iter::once(substs.discr_ty(tcx)));
624 let prefix = univariant_uninterned(
625 &prefix_tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
626 &ReprOptions::default(),
627 StructKind::AlwaysSized)?;
629 let mut size = prefix.size;
630 let mut align = prefix.align;
631 let variants_tys = substs.state_tys(def_id, tcx);
632 let variants = variants_tys.enumerate().map(|(i, variant_tys)| {
633 let mut variant = univariant_uninterned(
634 &variant_tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
635 &ReprOptions::default(),
636 StructKind::Prefixed(prefix.size, prefix.align.abi))?;
638 variant.variants = Variants::Single { index: VariantIdx::new(i) };
640 size = size.max(variant.size);
641 align = align.max(variant.align);
644 }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
646 let abi = if prefix.abi.is_uninhabited() ||
647 variants.iter().all(|v| v.abi.is_uninhabited()) {
650 Abi::Aggregate { sized: true }
652 let discr = match &self.layout_of(substs.discr_ty(tcx))?.abi {
653 Abi::Scalar(s) => s.clone(),
657 let layout = tcx.intern_layout(LayoutDetails {
658 variants: Variants::Multiple {
660 discr_kind: DiscriminantKind::Tag,
664 fields: prefix.fields,
669 debug!("generator layout: {:#?}", layout);
673 ty::Closure(def_id, ref substs) => {
674 let tys = substs.upvar_tys(def_id, tcx);
675 univariant(&tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
676 &ReprOptions::default(),
677 StructKind::AlwaysSized)?
681 let kind = if tys.len() == 0 {
682 StructKind::AlwaysSized
684 StructKind::MaybeUnsized
687 univariant(&tys.iter().map(|k| {
688 self.layout_of(k.expect_ty())
689 }).collect::<Result<Vec<_>, _>>()?, &ReprOptions::default(), kind)?
692 // SIMD vector types.
693 ty::Adt(def, ..) if def.repr.simd() => {
694 let element = self.layout_of(ty.simd_type(tcx))?;
695 let count = ty.simd_size(tcx) as u64;
697 let scalar = match element.abi {
698 Abi::Scalar(ref scalar) => scalar.clone(),
700 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` with \
701 a non-machine element type `{}`",
705 let size = element.size.checked_mul(count, dl)
706 .ok_or(LayoutError::SizeOverflow(ty))?;
707 let align = dl.vector_align(size);
708 let size = size.align_to(align.abi);
710 tcx.intern_layout(LayoutDetails {
711 variants: Variants::Single { index: VariantIdx::new(0) },
712 fields: FieldPlacement::Array {
713 stride: element.size,
726 ty::Adt(def, substs) => {
727 // Cache the field layouts.
728 let variants = def.variants.iter().map(|v| {
729 v.fields.iter().map(|field| {
730 self.layout_of(field.ty(tcx, substs))
731 }).collect::<Result<Vec<_>, _>>()
732 }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
735 let packed = def.repr.packed();
736 if packed && def.repr.align > 0 {
737 bug!("Union cannot be packed and aligned");
740 let pack = Align::from_bytes(def.repr.pack as u64).unwrap();
742 let mut align = if packed {
748 if def.repr.align > 0 {
749 let repr_align = def.repr.align as u64;
751 AbiAndPrefAlign::new(Align::from_bytes(repr_align).unwrap()));
754 let optimize = !def.repr.inhibit_union_abi_opt();
755 let mut size = Size::ZERO;
756 let mut abi = Abi::Aggregate { sized: true };
757 let index = VariantIdx::new(0);
758 for field in &variants[index] {
759 assert!(!field.is_unsized());
761 let field_align = if packed {
762 field.align.min(AbiAndPrefAlign::new(pack))
766 align = align.max(field_align);
768 // If all non-ZST fields have the same ABI, forward this ABI
769 if optimize && !field.is_zst() {
770 // Normalize scalar_unit to the maximal valid range
771 let field_abi = match &field.abi {
772 Abi::Scalar(x) => Abi::Scalar(scalar_unit(x.value)),
773 Abi::ScalarPair(x, y) => {
775 scalar_unit(x.value),
776 scalar_unit(y.value),
779 Abi::Vector { element: x, count } => {
781 element: scalar_unit(x.value),
786 Abi::Aggregate { .. } => Abi::Aggregate { sized: true },
789 if size == Size::ZERO {
790 // first non ZST: initialize 'abi'
792 } else if abi != field_abi {
793 // different fields have different ABI: reset to Aggregate
794 abi = Abi::Aggregate { sized: true };
798 size = cmp::max(size, field.size);
801 return Ok(tcx.intern_layout(LayoutDetails {
802 variants: Variants::Single { index },
803 fields: FieldPlacement::Union(variants[index].len()),
806 size: size.align_to(align.abi)
810 // A variant is absent if it's uninhabited and only has ZST fields.
811 // Present uninhabited variants only require space for their fields,
812 // but *not* an encoding of the discriminant (e.g., a tag value).
813 // See issue #49298 for more details on the need to leave space
814 // for non-ZST uninhabited data (mostly partial initialization).
815 let absent = |fields: &[TyLayout<'_>]| {
816 let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
817 let is_zst = fields.iter().all(|f| f.is_zst());
818 uninhabited && is_zst
820 let (present_first, present_second) = {
821 let mut present_variants = variants.iter_enumerated().filter_map(|(i, v)| {
828 (present_variants.next(), present_variants.next())
830 if present_first.is_none() {
831 // Uninhabited because it has no variants, or only absent ones.
832 return tcx.layout_raw(param_env.and(tcx.types.never));
835 let is_struct = !def.is_enum() ||
836 // Only one variant is present.
837 (present_second.is_none() &&
838 // Representation optimizations are allowed.
839 !def.repr.inhibit_enum_layout_opt());
841 // Struct, or univariant enum equivalent to a struct.
842 // (Typechecking will reject discriminant-sizing attrs.)
844 let v = present_first.unwrap();
845 let kind = if def.is_enum() || variants[v].len() == 0 {
846 StructKind::AlwaysSized
848 let param_env = tcx.param_env(def.did);
849 let last_field = def.variants[v].fields.last().unwrap();
850 let always_sized = tcx.type_of(last_field.did)
851 .is_sized(tcx.at(DUMMY_SP), param_env);
852 if !always_sized { StructKind::MaybeUnsized }
853 else { StructKind::AlwaysSized }
856 let mut st = univariant_uninterned(&variants[v], &def.repr, kind)?;
857 st.variants = Variants::Single { index: v };
858 let (start, end) = self.tcx.layout_scalar_valid_range(def.did);
860 Abi::Scalar(ref mut scalar) |
861 Abi::ScalarPair(ref mut scalar, _) => {
862 // the asserts ensure that we are not using the
863 // `#[rustc_layout_scalar_valid_range(n)]`
864 // attribute to widen the range of anything as that would probably
865 // result in UB somewhere
866 if let Bound::Included(start) = start {
867 assert!(*scalar.valid_range.start() <= start);
868 scalar.valid_range = start..=*scalar.valid_range.end();
870 if let Bound::Included(end) = end {
871 assert!(*scalar.valid_range.end() >= end);
872 scalar.valid_range = *scalar.valid_range.start()..=end;
876 start == Bound::Unbounded && end == Bound::Unbounded,
877 "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
882 return Ok(tcx.intern_layout(st));
885 // The current code for niche-filling relies on variant indices
886 // instead of actual discriminants, so dataful enums with
887 // explicit discriminants (RFC #2363) would misbehave.
888 let no_explicit_discriminants = def.variants.iter_enumerated()
889 .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
891 // Niche-filling enum optimization.
892 if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
893 let mut dataful_variant = None;
894 let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
896 // Find one non-ZST variant.
897 'variants: for (v, fields) in variants.iter_enumerated() {
903 if dataful_variant.is_none() {
904 dataful_variant = Some(v);
907 dataful_variant = None;
912 niche_variants = *niche_variants.start().min(&v)..=v;
915 if niche_variants.start() > niche_variants.end() {
916 dataful_variant = None;
919 if let Some(i) = dataful_variant {
921 niche_variants.end().as_u32() - niche_variants.start().as_u32() + 1
923 for (field_index, &field) in variants[i].iter().enumerate() {
924 let niche = match self.find_niche(field)? {
925 Some(niche) => niche,
928 let (niche_start, niche_scalar) = match niche.reserve(self, count) {
933 let mut align = dl.aggregate_align;
934 let st = variants.iter_enumerated().map(|(j, v)| {
935 let mut st = univariant_uninterned(v,
936 &def.repr, StructKind::AlwaysSized)?;
937 st.variants = Variants::Single { index: j };
939 align = align.max(st.align);
942 }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
944 let offset = st[i].fields.offset(field_index) + niche.offset;
945 let size = st[i].size;
947 let mut abi = match st[i].abi {
948 Abi::Scalar(_) => Abi::Scalar(niche_scalar.clone()),
949 Abi::ScalarPair(ref first, ref second) => {
950 // We need to use scalar_unit to reset the
951 // valid range to the maximal one for that
952 // primitive, because only the niche is
953 // guaranteed to be initialised, not the
955 if offset.bytes() == 0 {
957 niche_scalar.clone(),
958 scalar_unit(second.value),
962 scalar_unit(first.value),
963 niche_scalar.clone(),
967 _ => Abi::Aggregate { sized: true },
970 if st.iter().all(|v| v.abi.is_uninhabited()) {
971 abi = Abi::Uninhabited;
974 return Ok(tcx.intern_layout(LayoutDetails {
975 variants: Variants::Multiple {
977 discr_kind: DiscriminantKind::Niche {
985 fields: FieldPlacement::Arbitrary {
986 offsets: vec![offset],
987 memory_index: vec![0]
997 let (mut min, mut max) = (i128::max_value(), i128::min_value());
998 let discr_type = def.repr.discr_type();
999 let bits = Integer::from_attr(self, discr_type).size().bits();
1000 for (i, discr) in def.discriminants(tcx) {
1001 if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
1004 let mut x = discr.val as i128;
1005 if discr_type.is_signed() {
1006 // sign extend the raw representation to be an i128
1007 x = (x << (128 - bits)) >> (128 - bits);
1009 if x < min { min = x; }
1010 if x > max { max = x; }
1012 // We might have no inhabited variants, so pretend there's at least one.
1013 if (min, max) == (i128::max_value(), i128::min_value()) {
1017 assert!(min <= max, "discriminant range is {}...{}", min, max);
1018 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
1020 let mut align = dl.aggregate_align;
1021 let mut size = Size::ZERO;
1023 // We're interested in the smallest alignment, so start large.
1024 let mut start_align = Align::from_bytes(256).unwrap();
1025 assert_eq!(Integer::for_align(dl, start_align), None);
1027 // repr(C) on an enum tells us to make a (tag, union) layout,
1028 // so we need to grow the prefix alignment to be at least
1029 // the alignment of the union. (This value is used both for
1030 // determining the alignment of the overall enum, and the
1031 // determining the alignment of the payload after the tag.)
1032 let mut prefix_align = min_ity.align(dl).abi;
1034 for fields in &variants {
1035 for field in fields {
1036 prefix_align = prefix_align.max(field.align.abi);
1041 // Create the set of structs that represent each variant.
1042 let mut layout_variants = variants.iter_enumerated().map(|(i, field_layouts)| {
1043 let mut st = univariant_uninterned(&field_layouts,
1044 &def.repr, StructKind::Prefixed(min_ity.size(), prefix_align))?;
1045 st.variants = Variants::Single { index: i };
1046 // Find the first field we can't move later
1047 // to make room for a larger discriminant.
1048 for field in st.fields.index_by_increasing_offset().map(|j| field_layouts[j]) {
1049 if !field.is_zst() || field.align.abi.bytes() != 1 {
1050 start_align = start_align.min(field.align.abi);
1054 size = cmp::max(size, st.size);
1055 align = align.max(st.align);
1057 }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1059 // Align the maximum variant size to the largest alignment.
1060 size = size.align_to(align.abi);
1062 if size.bytes() >= dl.obj_size_bound() {
1063 return Err(LayoutError::SizeOverflow(ty));
1066 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
1067 if typeck_ity < min_ity {
1068 // It is a bug if Layout decided on a greater discriminant size than typeck for
1069 // some reason at this point (based on values discriminant can take on). Mostly
1070 // because this discriminant will be loaded, and then stored into variable of
1071 // type calculated by typeck. Consider such case (a bug): typeck decided on
1072 // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1073 // discriminant values. That would be a bug, because then, in codegen, in order
1074 // to store this 16-bit discriminant into 8-bit sized temporary some of the
1075 // space necessary to represent would have to be discarded (or layout is wrong
1076 // on thinking it needs 16 bits)
1077 bug!("layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1078 min_ity, typeck_ity);
1079 // However, it is fine to make discr type however large (as an optimisation)
1080 // after this point – we’ll just truncate the value we load in codegen.
1083 // Check to see if we should use a different type for the
1084 // discriminant. We can safely use a type with the same size
1085 // as the alignment of the first field of each variant.
1086 // We increase the size of the discriminant to avoid LLVM copying
1087 // padding when it doesn't need to. This normally causes unaligned
1088 // load/stores and excessive memcpy/memset operations. By using a
1089 // bigger integer size, LLVM can be sure about its contents and
1090 // won't be so conservative.
1092 // Use the initial field alignment
1093 let mut ity = if def.repr.c() || def.repr.int.is_some() {
1096 Integer::for_align(dl, start_align).unwrap_or(min_ity)
1099 // If the alignment is not larger than the chosen discriminant size,
1100 // don't use the alignment as the final size.
1104 // Patch up the variants' first few fields.
1105 let old_ity_size = min_ity.size();
1106 let new_ity_size = ity.size();
1107 for variant in &mut layout_variants {
1108 match variant.fields {
1109 FieldPlacement::Arbitrary { ref mut offsets, .. } => {
1111 if *i <= old_ity_size {
1112 assert_eq!(*i, old_ity_size);
1116 // We might be making the struct larger.
1117 if variant.size <= old_ity_size {
1118 variant.size = new_ity_size;
1126 let tag_mask = !0u128 >> (128 - ity.size().bits());
1128 value: Int(ity, signed),
1129 valid_range: (min as u128 & tag_mask)..=(max as u128 & tag_mask),
1131 let mut abi = Abi::Aggregate { sized: true };
1132 if tag.value.size(dl) == size {
1133 abi = Abi::Scalar(tag.clone());
1135 // Try to use a ScalarPair for all tagged enums.
1136 let mut common_prim = None;
1137 for (field_layouts, layout_variant) in variants.iter().zip(&layout_variants) {
1138 let offsets = match layout_variant.fields {
1139 FieldPlacement::Arbitrary { ref offsets, .. } => offsets,
1142 let mut fields = field_layouts
1145 .filter(|p| !p.0.is_zst());
1146 let (field, offset) = match (fields.next(), fields.next()) {
1147 (None, None) => continue,
1148 (Some(pair), None) => pair,
1154 let prim = match field.details.abi {
1155 Abi::Scalar(ref scalar) => scalar.value,
1161 if let Some(pair) = common_prim {
1162 // This is pretty conservative. We could go fancier
1163 // by conflating things like i32 and u32, or even
1164 // realising that (u8, u8) could just cohabit with
1166 if pair != (prim, offset) {
1171 common_prim = Some((prim, offset));
1174 if let Some((prim, offset)) = common_prim {
1175 let pair = scalar_pair(tag.clone(), scalar_unit(prim));
1176 let pair_offsets = match pair.fields {
1177 FieldPlacement::Arbitrary {
1181 assert_eq!(memory_index, &[0, 1]);
1186 if pair_offsets[0] == Size::ZERO &&
1187 pair_offsets[1] == *offset &&
1188 align == pair.align &&
1190 // We can use `ScalarPair` only when it matches our
1191 // already computed layout (including `#[repr(C)]`).
1197 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1198 abi = Abi::Uninhabited;
1201 tcx.intern_layout(LayoutDetails {
1202 variants: Variants::Multiple {
1204 discr_kind: DiscriminantKind::Tag,
1206 variants: layout_variants,
1208 fields: FieldPlacement::Arbitrary {
1209 offsets: vec![Size::ZERO],
1210 memory_index: vec![0]
1218 // Types with no meaningful known layout.
1219 ty::Projection(_) | ty::Opaque(..) => {
1220 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1221 if ty == normalized {
1222 return Err(LayoutError::Unknown(ty));
1224 tcx.layout_raw(param_env.and(normalized))?
1228 ty::Placeholder(..) |
1229 ty::UnnormalizedProjection(..) |
1230 ty::GeneratorWitness(..) |
1232 bug!("LayoutDetails::compute: unexpected type `{}`", ty)
1235 ty::Param(_) | ty::Error => {
1236 return Err(LayoutError::Unknown(ty));
1241 /// This is invoked by the `layout_raw` query to record the final
1242 /// layout of each type.
1244 fn record_layout_for_printing(&self, layout: TyLayout<'tcx>) {
1245 // If we are running with `-Zprint-type-sizes`, maybe record layouts
1246 // for dumping later.
1247 if self.tcx.sess.opts.debugging_opts.print_type_sizes {
1248 self.record_layout_for_printing_outlined(layout)
1252 fn record_layout_for_printing_outlined(&self, layout: TyLayout<'tcx>) {
1253 // Ignore layouts that are done with non-empty environments or
1254 // non-monomorphic layouts, as the user only wants to see the stuff
1255 // resulting from the final codegen session.
1257 layout.ty.has_param_types() ||
1258 layout.ty.has_self_ty() ||
1259 !self.param_env.caller_bounds.is_empty()
1264 // (delay format until we actually need it)
1265 let record = |kind, packed, opt_discr_size, variants| {
1266 let type_desc = format!("{:?}", layout.ty);
1267 self.tcx.sess.code_stats.borrow_mut().record_type_size(kind,
1276 let adt_def = match layout.ty.sty {
1277 ty::Adt(ref adt_def, _) => {
1278 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1282 ty::Closure(..) => {
1283 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1284 record(DataTypeKind::Closure, false, None, vec![]);
1289 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1294 let adt_kind = adt_def.adt_kind();
1295 let adt_packed = adt_def.repr.packed();
1297 let build_variant_info = |n: Option<Ident>,
1299 layout: TyLayout<'tcx>| {
1300 let mut min_size = Size::ZERO;
1301 let field_info: Vec<_> = flds.iter().enumerate().map(|(i, &name)| {
1302 match layout.field(self, i) {
1304 bug!("no layout found for field {}: `{:?}`", name, err);
1306 Ok(field_layout) => {
1307 let offset = layout.fields.offset(i);
1308 let field_end = offset + field_layout.size;
1309 if min_size < field_end {
1310 min_size = field_end;
1312 session::FieldInfo {
1313 name: name.to_string(),
1314 offset: offset.bytes(),
1315 size: field_layout.size.bytes(),
1316 align: field_layout.align.abi.bytes(),
1322 session::VariantInfo {
1323 name: n.map(|n| n.to_string()),
1324 kind: if layout.is_unsized() {
1325 session::SizeKind::Min
1327 session::SizeKind::Exact
1329 align: layout.align.abi.bytes(),
1330 size: if min_size.bytes() == 0 {
1339 match layout.variants {
1340 Variants::Single { index } => {
1341 debug!("print-type-size `{:#?}` variant {}",
1342 layout, adt_def.variants[index].ident);
1343 if !adt_def.variants.is_empty() {
1344 let variant_def = &adt_def.variants[index];
1345 let fields: Vec<_> =
1346 variant_def.fields.iter().map(|f| f.ident.name).collect();
1347 record(adt_kind.into(),
1350 vec![build_variant_info(Some(variant_def.ident),
1354 // (This case arises for *empty* enums; so give it
1356 record(adt_kind.into(), adt_packed, None, vec![]);
1360 Variants::Multiple { ref discr, ref discr_kind, .. } => {
1361 debug!("print-type-size `{:#?}` adt general variants def {}",
1362 layout.ty, adt_def.variants.len());
1363 let variant_infos: Vec<_> =
1364 adt_def.variants.iter_enumerated().map(|(i, variant_def)| {
1365 let fields: Vec<_> =
1366 variant_def.fields.iter().map(|f| f.ident.name).collect();
1367 build_variant_info(Some(variant_def.ident),
1369 layout.for_variant(self, i))
1372 record(adt_kind.into(), adt_packed, match discr_kind {
1373 DiscriminantKind::Tag => Some(discr.value.size(self)),
1381 /// Type size "skeleton", i.e., the only information determining a type's size.
1382 /// While this is conservative, (aside from constant sizes, only pointers,
1383 /// newtypes thereof and null pointer optimized enums are allowed), it is
1384 /// enough to statically check common use cases of transmute.
1385 #[derive(Copy, Clone, Debug)]
1386 pub enum SizeSkeleton<'tcx> {
1387 /// Any statically computable Layout.
1390 /// A potentially-fat pointer.
1392 /// If true, this pointer is never null.
1394 /// The type which determines the unsized metadata, if any,
1395 /// of this pointer. Either a type parameter or a projection
1396 /// depending on one, with regions erased.
1401 impl<'a, 'tcx> SizeSkeleton<'tcx> {
1402 pub fn compute(ty: Ty<'tcx>,
1403 tcx: TyCtxt<'a, 'tcx, 'tcx>,
1404 param_env: ty::ParamEnv<'tcx>)
1405 -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1406 debug_assert!(!ty.has_infer_types());
1408 // First try computing a static layout.
1409 let err = match tcx.layout_of(param_env.and(ty)) {
1411 return Ok(SizeSkeleton::Known(layout.size));
1417 ty::Ref(_, pointee, _) |
1418 ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1419 let non_zero = !ty.is_unsafe_ptr();
1420 let tail = tcx.struct_tail(pointee);
1422 ty::Param(_) | ty::Projection(_) => {
1423 debug_assert!(tail.has_param_types() || tail.has_self_ty());
1424 Ok(SizeSkeleton::Pointer {
1426 tail: tcx.erase_regions(&tail)
1430 bug!("SizeSkeleton::compute({}): layout errored ({}), yet \
1431 tail `{}` is not a type parameter or a projection",
1437 ty::Adt(def, substs) => {
1438 // Only newtypes and enums w/ nullable pointer optimization.
1439 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
1443 // Get a zero-sized variant or a pointer newtype.
1444 let zero_or_ptr_variant = |i| {
1445 let i = VariantIdx::new(i);
1446 let fields = def.variants[i].fields.iter().map(|field| {
1447 SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env)
1450 for field in fields {
1453 SizeSkeleton::Known(size) => {
1454 if size.bytes() > 0 {
1458 SizeSkeleton::Pointer {..} => {
1469 let v0 = zero_or_ptr_variant(0)?;
1471 if def.variants.len() == 1 {
1472 if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1473 return Ok(SizeSkeleton::Pointer {
1474 non_zero: non_zero || match tcx.layout_scalar_valid_range(def.did) {
1475 (Bound::Included(start), Bound::Unbounded) => start > 0,
1476 (Bound::Included(start), Bound::Included(end)) =>
1477 0 < start && start < end,
1487 let v1 = zero_or_ptr_variant(1)?;
1488 // Nullable pointer enum optimization.
1490 (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None) |
1491 (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
1492 Ok(SizeSkeleton::Pointer {
1501 ty::Projection(_) | ty::Opaque(..) => {
1502 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1503 if ty == normalized {
1506 SizeSkeleton::compute(normalized, tcx, param_env)
1514 pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
1515 match (self, other) {
1516 (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
1517 (SizeSkeleton::Pointer { tail: a, .. },
1518 SizeSkeleton::Pointer { tail: b, .. }) => a == b,
1524 pub trait HasTyCtxt<'tcx>: HasDataLayout {
1525 fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx>;
1528 pub trait HasParamEnv<'tcx> {
1529 fn param_env(&self) -> ty::ParamEnv<'tcx>;
1532 impl<'a, 'gcx, 'tcx> HasDataLayout for TyCtxt<'a, 'gcx, 'tcx> {
1533 fn data_layout(&self) -> &TargetDataLayout {
1538 impl<'a, 'gcx, 'tcx> HasTyCtxt<'gcx> for TyCtxt<'a, 'gcx, 'tcx> {
1539 fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> {
1544 impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
1545 fn param_env(&self) -> ty::ParamEnv<'tcx> {
1550 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
1551 fn data_layout(&self) -> &TargetDataLayout {
1552 self.tcx.data_layout()
1556 impl<'gcx, 'tcx, T: HasTyCtxt<'gcx>> HasTyCtxt<'gcx> for LayoutCx<'tcx, T> {
1557 fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> {
1562 pub trait MaybeResult<T> {
1565 fn from(x: Result<T, Self::Error>) -> Self;
1566 fn to_result(self) -> Result<T, Self::Error>;
1569 impl<T> MaybeResult<T> for T {
1572 fn from(x: Result<T, Self::Error>) -> Self {
1576 fn to_result(self) -> Result<T, Self::Error> {
1581 impl<T, E> MaybeResult<T> for Result<T, E> {
1584 fn from(x: Result<T, Self::Error>) -> Self {
1587 fn to_result(self) -> Result<T, Self::Error> {
1592 pub type TyLayout<'tcx> = ::rustc_target::abi::TyLayout<'tcx, Ty<'tcx>>;
1594 impl<'a, 'tcx> LayoutOf for LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
1596 type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
1598 /// Computes the layout of a type. Note that this implicitly
1599 /// executes in "reveal all" mode.
1600 fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
1601 let param_env = self.param_env.with_reveal_all();
1602 let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1603 let details = self.tcx.layout_raw(param_env.and(ty))?;
1604 let layout = TyLayout {
1609 // N.B., this recording is normally disabled; when enabled, it
1610 // can however trigger recursive invocations of `layout_of`.
1611 // Therefore, we execute it *after* the main query has
1612 // completed, to avoid problems around recursive structures
1613 // and the like. (Admittedly, I wasn't able to reproduce a problem
1614 // here, but it seems like the right thing to do. -nmatsakis)
1615 self.record_layout_for_printing(layout);
1621 impl<'a, 'tcx> LayoutOf for LayoutCx<'tcx, ty::query::TyCtxtAt<'a, 'tcx, 'tcx>> {
1623 type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
1625 /// Computes the layout of a type. Note that this implicitly
1626 /// executes in "reveal all" mode.
1627 fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
1628 let param_env = self.param_env.with_reveal_all();
1629 let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1630 let details = self.tcx.layout_raw(param_env.and(ty))?;
1631 let layout = TyLayout {
1636 // N.B., this recording is normally disabled; when enabled, it
1637 // can however trigger recursive invocations of `layout_of`.
1638 // Therefore, we execute it *after* the main query has
1639 // completed, to avoid problems around recursive structures
1640 // and the like. (Admittedly, I wasn't able to reproduce a problem
1641 // here, but it seems like the right thing to do. -nmatsakis)
1644 param_env: self.param_env
1646 cx.record_layout_for_printing(layout);
1652 // Helper (inherent) `layout_of` methods to avoid pushing `LayoutCx` to users.
1653 impl TyCtxt<'a, 'tcx, '_> {
1654 /// Computes the layout of a type. Note that this implicitly
1655 /// executes in "reveal all" mode.
1657 pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
1658 -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
1660 tcx: self.global_tcx(),
1661 param_env: param_env_and_ty.param_env
1663 cx.layout_of(param_env_and_ty.value)
1667 impl ty::query::TyCtxtAt<'a, 'tcx, '_> {
1668 /// Computes the layout of a type. Note that this implicitly
1669 /// executes in "reveal all" mode.
1671 pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
1672 -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
1674 tcx: self.global_tcx().at(self.span),
1675 param_env: param_env_and_ty.param_env
1677 cx.layout_of(param_env_and_ty.value)
1681 impl<'a, 'tcx, C> TyLayoutMethods<'tcx, C> for Ty<'tcx>
1682 where C: LayoutOf<Ty = Ty<'tcx>> + HasTyCtxt<'tcx>,
1683 C::TyLayout: MaybeResult<TyLayout<'tcx>>,
1684 C: HasParamEnv<'tcx>
1686 fn for_variant(this: TyLayout<'tcx>, cx: &C, variant_index: VariantIdx) -> TyLayout<'tcx> {
1687 let details = match this.variants {
1688 Variants::Single { index } if index == variant_index => this.details,
1690 Variants::Single { index } => {
1691 // Deny calling for_variant more than once for non-Single enums.
1692 if let Ok(layout) = cx.layout_of(this.ty).to_result() {
1693 assert_eq!(layout.variants, Variants::Single { index });
1696 let fields = match this.ty.sty {
1697 ty::Adt(def, _) => def.variants[variant_index].fields.len(),
1701 tcx.intern_layout(LayoutDetails {
1702 variants: Variants::Single { index: variant_index },
1703 fields: FieldPlacement::Union(fields),
1704 abi: Abi::Uninhabited,
1705 align: tcx.data_layout.i8_align,
1710 Variants::Multiple { ref variants, .. } => {
1711 &variants[variant_index]
1715 assert_eq!(details.variants, Variants::Single { index: variant_index });
1723 fn field(this: TyLayout<'tcx>, cx: &C, i: usize) -> C::TyLayout {
1725 let discr_layout = |discr: &Scalar| -> C::TyLayout {
1726 let layout = LayoutDetails::scalar(cx, discr.clone());
1727 MaybeResult::from(Ok(TyLayout {
1728 details: tcx.intern_layout(layout),
1729 ty: discr.value.to_ty(tcx),
1733 cx.layout_of(match this.ty.sty {
1742 ty::GeneratorWitness(..) |
1744 ty::Dynamic(..) => {
1745 bug!("TyLayout::field_type({:?}): not applicable", this)
1748 // Potentially-fat pointers.
1749 ty::Ref(_, pointee, _) |
1750 ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1751 assert!(i < this.fields.count());
1753 // Reuse the fat *T type as its own thin pointer data field.
1754 // This provides information about e.g., DST struct pointees
1755 // (which may have no non-DST form), and will work as long
1756 // as the `Abi` or `FieldPlacement` is checked by users.
1758 let nil = tcx.mk_unit();
1759 let ptr_ty = if this.ty.is_unsafe_ptr() {
1762 tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
1764 return MaybeResult::from(cx.layout_of(ptr_ty).to_result().map(|mut ptr_layout| {
1765 ptr_layout.ty = this.ty;
1770 match tcx.struct_tail(pointee).sty {
1772 ty::Str => tcx.types.usize,
1773 ty::Dynamic(_, _) => {
1775 tcx.lifetimes.re_static,
1776 tcx.mk_array(tcx.types.usize, 3),
1778 /* FIXME: use actual fn pointers
1779 Warning: naively computing the number of entries in the
1780 vtable by counting the methods on the trait + methods on
1781 all parent traits does not work, because some methods can
1782 be not object safe and thus excluded from the vtable.
1783 Increase this counter if you tried to implement this but
1784 failed to do it without duplicating a lot of code from
1785 other places in the compiler: 2
1787 tcx.mk_array(tcx.types.usize, 3),
1788 tcx.mk_array(Option<fn()>),
1792 _ => bug!("TyLayout::field_type({:?}): not applicable", this)
1796 // Arrays and slices.
1797 ty::Array(element, _) |
1798 ty::Slice(element) => element,
1799 ty::Str => tcx.types.u8,
1801 // Tuples, generators and closures.
1802 ty::Closure(def_id, ref substs) => {
1803 substs.upvar_tys(def_id, tcx).nth(i).unwrap()
1806 ty::Generator(def_id, ref substs, _) => {
1807 match this.variants {
1808 Variants::Single { index } => {
1809 substs.state_tys(def_id, tcx)
1810 .nth(index.as_usize()).unwrap()
1813 Variants::Multiple { ref discr, discr_index, .. } => {
1814 if i == discr_index {
1815 return discr_layout(discr);
1817 substs.prefix_tys(def_id, tcx).nth(i).unwrap()
1822 ty::Tuple(tys) => tys[i].expect_ty(),
1824 // SIMD vector types.
1825 ty::Adt(def, ..) if def.repr.simd() => {
1826 this.ty.simd_type(tcx)
1830 ty::Adt(def, substs) => {
1831 match this.variants {
1832 Variants::Single { index } => {
1833 def.variants[index].fields[i].ty(tcx, substs)
1836 // Discriminant field for enums (where applicable).
1837 Variants::Multiple { ref discr, .. } => {
1839 return discr_layout(discr);
1844 ty::Projection(_) | ty::UnnormalizedProjection(..) | ty::Bound(..) |
1845 ty::Placeholder(..) | ty::Opaque(..) | ty::Param(_) | ty::Infer(_) |
1847 bug!("TyLayout::field_type: unexpected type `{}`", this.ty)
1853 this: TyLayout<'tcx>,
1856 ) -> Option<PointeeInfo> {
1858 ty::RawPtr(mt) if offset.bytes() == 0 => {
1859 cx.layout_of(mt.ty).to_result().ok()
1860 .map(|layout| PointeeInfo {
1862 align: layout.align.abi,
1867 ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
1869 let is_freeze = ty.is_freeze(tcx, cx.param_env(), DUMMY_SP);
1870 let kind = match mt {
1871 hir::MutImmutable => if is_freeze {
1876 hir::MutMutable => {
1877 // Previously we would only emit noalias annotations for LLVM >= 6 or in
1878 // panic=abort mode. That was deemed right, as prior versions had many bugs
1879 // in conjunction with unwinding, but later versions didn’t seem to have
1880 // said issues. See issue #31681.
1882 // Alas, later on we encountered a case where noalias would generate wrong
1883 // code altogether even with recent versions of LLVM in *safe* code with no
1884 // unwinding involved. See #54462.
1886 // For now, do not enable mutable_noalias by default at all, while the
1887 // issue is being figured out.
1888 let mutable_noalias = tcx.sess.opts.debugging_opts.mutable_noalias
1890 if mutable_noalias {
1891 PointerKind::UniqueBorrowed
1898 cx.layout_of(ty).to_result().ok()
1899 .map(|layout| PointeeInfo {
1901 align: layout.align.abi,
1907 let mut data_variant = match this.variants {
1908 // Within the discriminant field, only the niche itself is
1909 // always initialized, so we only check for a pointer at its
1912 // If the niche is a pointer, it's either valid (according
1913 // to its type), or null (which the niche field's scalar
1914 // validity range encodes). This allows using
1915 // `dereferenceable_or_null` for e.g., `Option<&T>`, and
1916 // this will continue to work as long as we don't start
1917 // using more niches than just null (e.g., the first page of
1918 // the address space, or unaligned pointers).
1919 Variants::Multiple {
1920 discr_kind: DiscriminantKind::Niche {
1926 } if this.fields.offset(discr_index) == offset =>
1927 Some(this.for_variant(cx, dataful_variant)),
1931 if let Some(variant) = data_variant {
1932 // We're not interested in any unions.
1933 if let FieldPlacement::Union(_) = variant.fields {
1934 data_variant = None;
1938 let mut result = None;
1940 if let Some(variant) = data_variant {
1941 let ptr_end = offset + Pointer.size(cx);
1942 for i in 0..variant.fields.count() {
1943 let field_start = variant.fields.offset(i);
1944 if field_start <= offset {
1945 let field = variant.field(cx, i);
1946 result = field.to_result().ok()
1948 if ptr_end <= field_start + field.size {
1949 // We found the right field, look inside it.
1950 field.pointee_info_at(cx, offset - field_start)
1955 if result.is_some() {
1962 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
1963 if let Some(ref mut pointee) = result {
1964 if let ty::Adt(def, _) = this.ty.sty {
1965 if def.is_box() && offset.bytes() == 0 {
1966 pointee.safe = Some(PointerKind::UniqueOwned);
1984 fn reserve<'a, 'tcx>(
1986 cx: &LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>>,
1988 ) -> Option<(u128, Scalar)> {
1989 if count > self.available {
1992 let Scalar { value, valid_range: ref v } = self.scalar;
1993 let bits = value.size(cx).bits();
1994 assert!(bits <= 128);
1995 let max_value = !0u128 >> (128 - bits);
1996 let start = v.end().wrapping_add(1) & max_value;
1997 let end = v.end().wrapping_add(count) & max_value;
1998 Some((start, Scalar { value, valid_range: *v.start()..=end }))
2002 impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
2003 /// Find the offset of a niche leaf field, starting from
2004 /// the given type and recursing through aggregates.
2005 // FIXME(eddyb) traverse already optimized enums.
2006 fn find_niche(&self, layout: TyLayout<'tcx>) -> Result<Option<Niche>, LayoutError<'tcx>> {
2007 let scalar_niche = |scalar: &Scalar, offset| {
2008 let Scalar { value, valid_range: ref v } = *scalar;
2010 let bits = value.size(self).bits();
2011 assert!(bits <= 128);
2012 let max_value = !0u128 >> (128 - bits);
2014 // Find out how many values are outside the valid range.
2015 let available = if v.start() <= v.end() {
2016 v.start() + (max_value - v.end())
2018 v.start() - v.end() - 1
2021 // Give up if there is no niche value available.
2026 Some(Niche { offset, scalar: scalar.clone(), available })
2029 // Locals variables which live across yields are stored
2030 // in the generator type as fields. These may be uninitialized
2031 // so we don't look for niches there.
2032 if let ty::Generator(..) = layout.ty.sty {
2037 Abi::Scalar(ref scalar) => {
2038 return Ok(scalar_niche(scalar, Size::ZERO));
2040 Abi::ScalarPair(ref a, ref b) => {
2041 // HACK(nox): We iter on `b` and then `a` because `max_by_key`
2042 // returns the last maximum.
2043 let niche = iter::once(
2044 (b, a.value.size(self).align_to(b.value.align(self).abi))
2046 .chain(iter::once((a, Size::ZERO)))
2047 .filter_map(|(scalar, offset)| scalar_niche(scalar, offset))
2048 .max_by_key(|niche| niche.available);
2051 Abi::Vector { ref element, .. } => {
2052 return Ok(scalar_niche(element, Size::ZERO));
2057 // Perhaps one of the fields is non-zero, let's recurse and find out.
2058 if let FieldPlacement::Union(_) = layout.fields {
2059 // Only Rust enums have safe-to-inspect fields
2060 // (a discriminant), other unions are unsafe.
2061 if let Variants::Single { .. } = layout.variants {
2065 if let FieldPlacement::Array { count: original_64_bit_count, .. } = layout.fields {
2066 // rust-lang/rust#57038: avoid ICE within FieldPlacement::count when count too big
2067 if original_64_bit_count > usize::max_value() as u64 {
2068 return Err(LayoutError::SizeOverflow(layout.ty));
2070 if layout.fields.count() > 0 {
2071 return self.find_niche(layout.field(self, 0)?);
2076 let mut niche = None;
2077 let mut available = 0;
2078 for i in 0..layout.fields.count() {
2079 if let Some(mut c) = self.find_niche(layout.field(self, i)?)? {
2080 if c.available > available {
2081 available = c.available;
2082 c.offset += layout.fields.offset(i);
2091 impl<'a> HashStable<StableHashingContext<'a>> for Variants {
2092 fn hash_stable<W: StableHasherResult>(&self,
2093 hcx: &mut StableHashingContext<'a>,
2094 hasher: &mut StableHasher<W>) {
2095 use crate::ty::layout::Variants::*;
2096 mem::discriminant(self).hash_stable(hcx, hasher);
2099 Single { index } => {
2100 index.hash_stable(hcx, hasher);
2108 discr.hash_stable(hcx, hasher);
2109 discr_kind.hash_stable(hcx, hasher);
2110 discr_index.hash_stable(hcx, hasher);
2111 variants.hash_stable(hcx, hasher);
2117 impl<'a> HashStable<StableHashingContext<'a>> for DiscriminantKind {
2118 fn hash_stable<W: StableHasherResult>(&self,
2119 hcx: &mut StableHashingContext<'a>,
2120 hasher: &mut StableHasher<W>) {
2121 use crate::ty::layout::DiscriminantKind::*;
2122 mem::discriminant(self).hash_stable(hcx, hasher);
2131 dataful_variant.hash_stable(hcx, hasher);
2132 niche_variants.start().hash_stable(hcx, hasher);
2133 niche_variants.end().hash_stable(hcx, hasher);
2134 niche_start.hash_stable(hcx, hasher);
2140 impl<'a> HashStable<StableHashingContext<'a>> for FieldPlacement {
2141 fn hash_stable<W: StableHasherResult>(&self,
2142 hcx: &mut StableHashingContext<'a>,
2143 hasher: &mut StableHasher<W>) {
2144 use crate::ty::layout::FieldPlacement::*;
2145 mem::discriminant(self).hash_stable(hcx, hasher);
2149 count.hash_stable(hcx, hasher);
2151 Array { count, stride } => {
2152 count.hash_stable(hcx, hasher);
2153 stride.hash_stable(hcx, hasher);
2155 Arbitrary { ref offsets, ref memory_index } => {
2156 offsets.hash_stable(hcx, hasher);
2157 memory_index.hash_stable(hcx, hasher);
2163 impl<'a> HashStable<StableHashingContext<'a>> for VariantIdx {
2164 fn hash_stable<W: StableHasherResult>(
2166 hcx: &mut StableHashingContext<'a>,
2167 hasher: &mut StableHasher<W>,
2169 self.as_u32().hash_stable(hcx, hasher)
2173 impl<'a> HashStable<StableHashingContext<'a>> for Abi {
2174 fn hash_stable<W: StableHasherResult>(&self,
2175 hcx: &mut StableHashingContext<'a>,
2176 hasher: &mut StableHasher<W>) {
2177 use crate::ty::layout::Abi::*;
2178 mem::discriminant(self).hash_stable(hcx, hasher);
2182 Scalar(ref value) => {
2183 value.hash_stable(hcx, hasher);
2185 ScalarPair(ref a, ref b) => {
2186 a.hash_stable(hcx, hasher);
2187 b.hash_stable(hcx, hasher);
2189 Vector { ref element, count } => {
2190 element.hash_stable(hcx, hasher);
2191 count.hash_stable(hcx, hasher);
2193 Aggregate { sized } => {
2194 sized.hash_stable(hcx, hasher);
2200 impl<'a> HashStable<StableHashingContext<'a>> for Scalar {
2201 fn hash_stable<W: StableHasherResult>(&self,
2202 hcx: &mut StableHashingContext<'a>,
2203 hasher: &mut StableHasher<W>) {
2204 let Scalar { value, ref valid_range } = *self;
2205 value.hash_stable(hcx, hasher);
2206 valid_range.start().hash_stable(hcx, hasher);
2207 valid_range.end().hash_stable(hcx, hasher);
2211 impl_stable_hash_for!(struct crate::ty::layout::LayoutDetails {
2219 impl_stable_hash_for!(enum crate::ty::layout::Integer {
2227 impl_stable_hash_for!(enum crate::ty::layout::Primitive {
2228 Int(integer, signed),
2233 impl_stable_hash_for!(struct crate::ty::layout::AbiAndPrefAlign {
2238 impl<'gcx> HashStable<StableHashingContext<'gcx>> for Align {
2239 fn hash_stable<W: StableHasherResult>(&self,
2240 hcx: &mut StableHashingContext<'gcx>,
2241 hasher: &mut StableHasher<W>) {
2242 self.bytes().hash_stable(hcx, hasher);
2246 impl<'gcx> HashStable<StableHashingContext<'gcx>> for Size {
2247 fn hash_stable<W: StableHasherResult>(&self,
2248 hcx: &mut StableHashingContext<'gcx>,
2249 hasher: &mut StableHasher<W>) {
2250 self.bytes().hash_stable(hcx, hasher);
2254 impl<'a, 'gcx> HashStable<StableHashingContext<'a>> for LayoutError<'gcx>
2256 fn hash_stable<W: StableHasherResult>(&self,
2257 hcx: &mut StableHashingContext<'a>,
2258 hasher: &mut StableHasher<W>) {
2259 use crate::ty::layout::LayoutError::*;
2260 mem::discriminant(self).hash_stable(hcx, hasher);
2264 SizeOverflow(t) => t.hash_stable(hcx, hasher)
2269 pub trait FnTypeExt<'tcx, C>
2271 C: LayoutOf<Ty = Ty<'tcx>, TyLayout = TyLayout<'tcx>>
2275 + HasParamEnv<'tcx>,
2277 fn of_instance(cx: &C, instance: &ty::Instance<'tcx>) -> Self;
2278 fn new(cx: &C, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2279 fn new_vtable(cx: &C, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
2282 sig: ty::FnSig<'tcx>,
2283 extra_args: &[Ty<'tcx>],
2284 mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgType<'tcx, Ty<'tcx>>,
2286 fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi);
2289 impl<'tcx, C> FnTypeExt<'tcx, C> for call::FnType<'tcx, Ty<'tcx>>
2291 C: LayoutOf<Ty = Ty<'tcx>, TyLayout = TyLayout<'tcx>>
2295 + HasParamEnv<'tcx>,
2297 fn of_instance(cx: &C, instance: &ty::Instance<'tcx>) -> Self {
2298 let sig = instance.fn_sig(cx.tcx());
2301 .normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig);
2302 call::FnType::new(cx, sig, &[])
2305 fn new(cx: &C, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2306 call::FnType::new_internal(cx, sig, extra_args, |ty, _| ArgType::new(cx.layout_of(ty)))
2309 fn new_vtable(cx: &C, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
2310 FnTypeExt::new_internal(cx, sig, extra_args, |ty, arg_idx| {
2311 let mut layout = cx.layout_of(ty);
2312 // Don't pass the vtable, it's not an argument of the virtual fn.
2313 // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
2314 // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
2315 if arg_idx == Some(0) {
2316 let fat_pointer_ty = if layout.is_unsized() {
2317 // unsized `self` is passed as a pointer to `self`
2318 // FIXME (mikeyhew) change this to use &own if it is ever added to the language
2319 cx.tcx().mk_mut_ptr(layout.ty)
2322 Abi::ScalarPair(..) => (),
2323 _ => bug!("receiver type has unsupported layout: {:?}", layout),
2326 // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
2327 // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
2328 // elsewhere in the compiler as a method on a `dyn Trait`.
2329 // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
2330 // get a built-in pointer type
2331 let mut fat_pointer_layout = layout;
2332 'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
2333 && !fat_pointer_layout.ty.is_region_ptr()
2335 'iter_fields: for i in 0..fat_pointer_layout.fields.count() {
2336 let field_layout = fat_pointer_layout.field(cx, i);
2338 if !field_layout.is_zst() {
2339 fat_pointer_layout = field_layout;
2340 continue 'descend_newtypes;
2345 "receiver has no non-zero-sized fields {:?}",
2350 fat_pointer_layout.ty
2353 // we now have a type like `*mut RcBox<dyn Trait>`
2354 // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
2355 // this is understood as a special case elsewhere in the compiler
2356 let unit_pointer_ty = cx.tcx().mk_mut_ptr(cx.tcx().mk_unit());
2357 layout = cx.layout_of(unit_pointer_ty);
2358 layout.ty = fat_pointer_ty;
2360 ArgType::new(layout)
2366 sig: ty::FnSig<'tcx>,
2367 extra_args: &[Ty<'tcx>],
2368 mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgType<'tcx, Ty<'tcx>>,
2370 debug!("FnType::new_internal({:?}, {:?})", sig, extra_args);
2372 use rustc_target::spec::abi::Abi::*;
2373 let conv = match cx.tcx().sess.target.target.adjust_abi(sig.abi) {
2374 RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::C,
2376 // It's the ABI's job to select this, not ours.
2377 System => bug!("system abi should be selected elsewhere"),
2379 Stdcall => Conv::X86Stdcall,
2380 Fastcall => Conv::X86Fastcall,
2381 Vectorcall => Conv::X86VectorCall,
2382 Thiscall => Conv::X86ThisCall,
2384 Unadjusted => Conv::C,
2385 Win64 => Conv::X86_64Win64,
2386 SysV64 => Conv::X86_64SysV,
2387 Aapcs => Conv::ArmAapcs,
2388 PtxKernel => Conv::PtxKernel,
2389 Msp430Interrupt => Conv::Msp430Intr,
2390 X86Interrupt => Conv::X86Intr,
2391 AmdGpuKernel => Conv::AmdGpuKernel,
2393 // These API constants ought to be more specific...
2397 let mut inputs = sig.inputs();
2398 let extra_args = if sig.abi == RustCall {
2399 assert!(!sig.c_variadic && extra_args.is_empty());
2401 match sig.inputs().last().unwrap().sty {
2402 ty::Tuple(tupled_arguments) => {
2403 inputs = &sig.inputs()[0..sig.inputs().len() - 1];
2404 tupled_arguments.iter().map(|k| k.expect_ty()).collect()
2408 "argument to function with \"rust-call\" ABI \
2414 assert!(sig.c_variadic || extra_args.is_empty());
2418 let target = &cx.tcx().sess.target.target;
2420 target.target_os == "windows" && target.arch == "x86_64" && target.target_env == "gnu";
2422 target.target_os == "linux" && target.arch == "s390x" && target.target_env == "gnu";
2424 target.target_os == "linux" && target.arch == "sparc64" && target.target_env == "gnu";
2425 let rust_abi = match sig.abi {
2426 RustIntrinsic | PlatformIntrinsic | Rust | RustCall => true,
2430 // Handle safe Rust thin and fat pointers.
2431 let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
2433 layout: TyLayout<'tcx>,
2436 // Booleans are always an i1 that needs to be zero-extended.
2437 if scalar.is_bool() {
2438 attrs.set(ArgAttribute::ZExt);
2442 // Only pointer types handled below.
2443 if scalar.value != Pointer {
2447 if scalar.valid_range.start() < scalar.valid_range.end() {
2448 if *scalar.valid_range.start() > 0 {
2449 attrs.set(ArgAttribute::NonNull);
2453 if let Some(pointee) = layout.pointee_info_at(cx, offset) {
2454 if let Some(kind) = pointee.safe {
2455 attrs.pointee_size = pointee.size;
2456 attrs.pointee_align = Some(pointee.align);
2458 // `Box` pointer parameters never alias because ownership is transferred
2459 // `&mut` pointer parameters never alias other parameters,
2460 // or mutable global data
2462 // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
2463 // and can be marked as both `readonly` and `noalias`, as
2464 // LLVM's definition of `noalias` is based solely on memory
2465 // dependencies rather than pointer equality
2466 let no_alias = match kind {
2467 PointerKind::Shared => false,
2468 PointerKind::UniqueOwned => true,
2469 PointerKind::Frozen | PointerKind::UniqueBorrowed => !is_return,
2472 attrs.set(ArgAttribute::NoAlias);
2475 if kind == PointerKind::Frozen && !is_return {
2476 attrs.set(ArgAttribute::ReadOnly);
2482 // Store the index of the last argument. This is useful for working with
2483 // C-compatible variadic arguments.
2484 let last_arg_idx = if sig.inputs().is_empty() {
2487 Some(sig.inputs().len() - 1)
2490 let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| {
2491 let is_return = arg_idx.is_none();
2492 let mut arg = mk_arg_type(ty, arg_idx);
2493 if arg.layout.is_zst() {
2494 // For some forsaken reason, x86_64-pc-windows-gnu
2495 // doesn't ignore zero-sized struct arguments.
2496 // The same is true for s390x-unknown-linux-gnu
2497 // and sparc64-unknown-linux-gnu.
2498 if is_return || rust_abi || (!win_x64_gnu && !linux_s390x && !linux_sparc64) {
2499 arg.mode = PassMode::Ignore(IgnoreMode::Zst);
2503 // If this is a C-variadic function, this is not the return value,
2504 // and there is one or more fixed arguments; ensure that the `VaList`
2505 // is ignored as an argument.
2507 match (last_arg_idx, arg_idx) {
2508 (Some(last_idx), Some(cur_idx)) if last_idx == cur_idx => {
2509 let va_list_did = match cx.tcx().lang_items().va_list() {
2511 None => bug!("`va_list` lang item required for C-variadic functions"),
2514 ty::Adt(def, _) if def.did == va_list_did => {
2515 // This is the "spoofed" `VaList`. Set the arguments mode
2516 // so that it will be ignored.
2517 arg.mode = PassMode::Ignore(IgnoreMode::CVarArgs);
2526 // FIXME(eddyb) other ABIs don't have logic for scalar pairs.
2527 if !is_return && rust_abi {
2528 if let Abi::ScalarPair(ref a, ref b) = arg.layout.abi {
2529 let mut a_attrs = ArgAttributes::new();
2530 let mut b_attrs = ArgAttributes::new();
2531 adjust_for_rust_scalar(&mut a_attrs, a, arg.layout, Size::ZERO, false);
2532 adjust_for_rust_scalar(
2536 a.value.size(cx).align_to(b.value.align(cx).abi),
2539 arg.mode = PassMode::Pair(a_attrs, b_attrs);
2544 if let Abi::Scalar(ref scalar) = arg.layout.abi {
2545 if let PassMode::Direct(ref mut attrs) = arg.mode {
2546 adjust_for_rust_scalar(attrs, scalar, arg.layout, Size::ZERO, is_return);
2553 let mut fn_ty = FnType {
2554 ret: arg_of(sig.output(), None),
2560 .map(|(i, ty)| arg_of(ty, Some(i)))
2562 c_variadic: sig.c_variadic,
2565 fn_ty.adjust_for_abi(cx, sig.abi);
2569 fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi) {
2570 if abi == SpecAbi::Unadjusted {
2574 if abi == SpecAbi::Rust
2575 || abi == SpecAbi::RustCall
2576 || abi == SpecAbi::RustIntrinsic
2577 || abi == SpecAbi::PlatformIntrinsic
2579 let fixup = |arg: &mut ArgType<'tcx, Ty<'tcx>>| {
2580 if arg.is_ignore() {
2584 match arg.layout.abi {
2585 Abi::Aggregate { .. } => {}
2587 // This is a fun case! The gist of what this is doing is
2588 // that we want callers and callees to always agree on the
2589 // ABI of how they pass SIMD arguments. If we were to *not*
2590 // make these arguments indirect then they'd be immediates
2591 // in LLVM, which means that they'd used whatever the
2592 // appropriate ABI is for the callee and the caller. That
2593 // means, for example, if the caller doesn't have AVX
2594 // enabled but the callee does, then passing an AVX argument
2595 // across this boundary would cause corrupt data to show up.
2597 // This problem is fixed by unconditionally passing SIMD
2598 // arguments through memory between callers and callees
2599 // which should get them all to agree on ABI regardless of
2600 // target feature sets. Some more information about this
2601 // issue can be found in #44367.
2603 // Note that the platform intrinsic ABI is exempt here as
2604 // that's how we connect up to LLVM and it's unstable
2605 // anyway, we control all calls to it in libstd.
2607 if abi != SpecAbi::PlatformIntrinsic
2608 && cx.tcx().sess.target.target.options.simd_types_indirect =>
2610 arg.make_indirect();
2617 let size = arg.layout.size;
2618 if arg.layout.is_unsized() || size > Pointer.size(cx) {
2619 arg.make_indirect();
2621 // We want to pass small aggregates as immediates, but using
2622 // a LLVM aggregate type for this leads to bad optimizations,
2623 // so we pick an appropriately sized integer type instead.
2625 kind: RegKind::Integer,
2630 fixup(&mut self.ret);
2631 for arg in &mut self.args {
2634 if let PassMode::Indirect(ref mut attrs, _) = self.ret.mode {
2635 attrs.set(ArgAttribute::StructRet);
2640 if let Err(msg) = self.adjust_for_cabi(cx, abi) {
2641 cx.tcx().sess.fatal(&msg);