1 use crate::session::{self, DataTypeKind};
2 use crate::ty::{self, Ty, TyCtxt, TypeFoldable, ReprOptions};
4 use syntax::ast::{self, Ident, IntTy, UintTy};
6 use syntax_pos::DUMMY_SP;
16 use crate::ich::StableHashingContext;
17 use rustc_data_structures::indexed_vec::{IndexVec, Idx};
18 use rustc_data_structures::stable_hasher::{HashStable, StableHasher,
21 pub use rustc_target::abi::*;
23 pub trait IntegerExt {
24 fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, signed: bool) -> Ty<'tcx>;
25 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
26 fn repr_discr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
34 impl IntegerExt for Integer {
35 fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, signed: bool) -> Ty<'tcx> {
36 match (*self, signed) {
37 (I8, false) => tcx.types.u8,
38 (I16, false) => tcx.types.u16,
39 (I32, false) => tcx.types.u32,
40 (I64, false) => tcx.types.u64,
41 (I128, false) => tcx.types.u128,
42 (I8, true) => tcx.types.i8,
43 (I16, true) => tcx.types.i16,
44 (I32, true) => tcx.types.i32,
45 (I64, true) => tcx.types.i64,
46 (I128, true) => tcx.types.i128,
50 /// Gets the Integer type from an attr::IntType.
51 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
52 let dl = cx.data_layout();
55 attr::SignedInt(IntTy::I8) | attr::UnsignedInt(UintTy::U8) => I8,
56 attr::SignedInt(IntTy::I16) | attr::UnsignedInt(UintTy::U16) => I16,
57 attr::SignedInt(IntTy::I32) | attr::UnsignedInt(UintTy::U32) => I32,
58 attr::SignedInt(IntTy::I64) | attr::UnsignedInt(UintTy::U64) => I64,
59 attr::SignedInt(IntTy::I128) | attr::UnsignedInt(UintTy::U128) => I128,
60 attr::SignedInt(IntTy::Isize) | attr::UnsignedInt(UintTy::Usize) => {
61 dl.ptr_sized_integer()
66 /// Finds the appropriate Integer type and signedness for the given
67 /// signed discriminant range and #[repr] attribute.
68 /// N.B.: u128 values above i128::MAX will be treated as signed, but
69 /// that shouldn't affect anything, other than maybe debuginfo.
70 fn repr_discr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
76 // Theoretically, negative values could be larger in unsigned representation
77 // than the unsigned representation of the signed minimum. However, if there
78 // are any negative values, the only valid unsigned representation is u128
79 // which can fit all i128 values, so the result remains unaffected.
80 let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
81 let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
83 let mut min_from_extern = None;
86 if let Some(ity) = repr.int {
87 let discr = Integer::from_attr(&tcx, ity);
88 let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
90 bug!("Integer::repr_discr: `#[repr]` hint too small for \
91 discriminant range of enum `{}", ty)
93 return (discr, ity.is_signed());
97 match &tcx.sess.target.target.arch[..] {
98 // WARNING: the ARM EABI has two variants; the one corresponding
99 // to `at_least == I32` appears to be used on Linux and NetBSD,
100 // but some systems may use the variant corresponding to no
101 // lower bound. However, we don't run on those yet...?
102 "arm" => min_from_extern = Some(I32),
103 _ => min_from_extern = Some(I32),
107 let at_least = min_from_extern.unwrap_or(min_default);
109 // If there are no negative values, we can use the unsigned fit.
111 (cmp::max(unsigned_fit, at_least), false)
113 (cmp::max(signed_fit, at_least), true)
118 pub trait PrimitiveExt {
119 fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx>;
122 impl PrimitiveExt for Primitive {
123 fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx> {
125 Int(i, signed) => i.to_ty(tcx, signed),
126 Float(FloatTy::F32) => tcx.types.f32,
127 Float(FloatTy::F64) => tcx.types.f64,
128 Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
133 /// The first half of a fat pointer.
135 /// - For a trait object, this is the address of the box.
136 /// - For a slice, this is the base address.
137 pub const FAT_PTR_ADDR: usize = 0;
139 /// The second half of a fat pointer.
141 /// - For a trait object, this is the address of the vtable.
142 /// - For a slice, this is the length.
143 pub const FAT_PTR_EXTRA: usize = 1;
145 #[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable)]
146 pub enum LayoutError<'tcx> {
148 SizeOverflow(Ty<'tcx>)
151 impl<'tcx> fmt::Display for LayoutError<'tcx> {
152 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
154 LayoutError::Unknown(ty) => {
155 write!(f, "the type `{:?}` has an unknown layout", ty)
157 LayoutError::SizeOverflow(ty) => {
158 write!(f, "the type `{:?}` is too big for the current architecture", ty)
164 fn layout_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
165 query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
166 -> Result<&'tcx LayoutDetails, LayoutError<'tcx>>
168 ty::tls::with_related_context(tcx, move |icx| {
169 let rec_limit = *tcx.sess.recursion_limit.get();
170 let (param_env, ty) = query.into_parts();
172 if icx.layout_depth > rec_limit {
174 &format!("overflow representing the type `{}`", ty));
177 // Update the ImplicitCtxt to increase the layout_depth
178 let icx = ty::tls::ImplicitCtxt {
179 layout_depth: icx.layout_depth + 1,
183 ty::tls::enter_context(&icx, |_| {
184 let cx = LayoutCx { tcx, param_env };
185 let layout = cx.layout_raw_uncached(ty);
186 // Type-level uninhabitedness should always imply ABI uninhabitedness.
187 if let Ok(layout) = layout {
188 if ty.conservative_is_privately_uninhabited(tcx) {
189 assert!(layout.abi.is_uninhabited());
197 pub fn provide(providers: &mut ty::query::Providers<'_>) {
198 *providers = ty::query::Providers {
204 pub struct LayoutCx<'tcx, C> {
206 pub param_env: ty::ParamEnv<'tcx>,
209 impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
210 fn layout_raw_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> {
212 let param_env = self.param_env;
213 let dl = self.data_layout();
214 let scalar_unit = |value: Primitive| {
215 let bits = value.size(dl).bits();
216 assert!(bits <= 128);
219 valid_range: 0..=(!0 >> (128 - bits))
222 let scalar = |value: Primitive| {
223 tcx.intern_layout(LayoutDetails::scalar(self, scalar_unit(value)))
225 let scalar_pair = |a: Scalar, b: Scalar| {
226 let b_align = b.value.align(dl);
227 let align = a.value.align(dl).max(b_align).max(dl.aggregate_align);
228 let b_offset = a.value.size(dl).align_to(b_align.abi);
229 let size = (b_offset + b.value.size(dl)).align_to(align.abi);
231 variants: Variants::Single { index: VariantIdx::new(0) },
232 fields: FieldPlacement::Arbitrary {
233 offsets: vec![Size::ZERO, b_offset],
234 memory_index: vec![0, 1]
236 abi: Abi::ScalarPair(a, b),
242 #[derive(Copy, Clone, Debug)]
244 /// A tuple, closure, or univariant which cannot be coerced to unsized.
246 /// A univariant, the last field of which may be coerced to unsized.
248 /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
249 Prefixed(Size, Align),
252 let univariant_uninterned = |fields: &[TyLayout<'_>], repr: &ReprOptions, kind| {
253 let packed = repr.packed();
254 if packed && repr.align > 0 {
255 bug!("struct cannot be packed and aligned");
258 let pack = Align::from_bytes(repr.pack as u64).unwrap();
260 let mut align = if packed {
266 let mut sized = true;
267 let mut offsets = vec![Size::ZERO; fields.len()];
268 let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
270 let mut optimize = !repr.inhibit_struct_field_reordering_opt();
271 if let StructKind::Prefixed(_, align) = kind {
272 optimize &= align.bytes() == 1;
276 let end = if let StructKind::MaybeUnsized = kind {
281 let optimizing = &mut inverse_memory_index[..end];
282 let field_align = |f: &TyLayout<'_>| {
283 if packed { f.align.abi.min(pack) } else { f.align.abi }
286 StructKind::AlwaysSized |
287 StructKind::MaybeUnsized => {
288 optimizing.sort_by_key(|&x| {
289 // Place ZSTs first to avoid "interesting offsets",
290 // especially with only one or two non-ZST fields.
291 let f = &fields[x as usize];
292 (!f.is_zst(), cmp::Reverse(field_align(f)))
295 StructKind::Prefixed(..) => {
296 optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
301 // inverse_memory_index holds field indices by increasing memory offset.
302 // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
303 // We now write field offsets to the corresponding offset slot;
304 // field 5 with offset 0 puts 0 in offsets[5].
305 // At the bottom of this function, we use inverse_memory_index to produce memory_index.
307 let mut offset = Size::ZERO;
309 if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
310 let prefix_align = if packed {
311 prefix_align.min(pack)
315 align = align.max(AbiAndPrefAlign::new(prefix_align));
316 offset = prefix_size.align_to(prefix_align);
319 for &i in &inverse_memory_index {
320 let field = fields[i as usize];
322 bug!("univariant: field #{} of `{}` comes after unsized field",
326 if field.is_unsized() {
330 // Invariant: offset < dl.obj_size_bound() <= 1<<61
331 let field_align = if packed {
332 field.align.min(AbiAndPrefAlign::new(pack))
336 offset = offset.align_to(field_align.abi);
337 align = align.max(field_align);
339 debug!("univariant offset: {:?} field: {:#?}", offset, field);
340 offsets[i as usize] = offset;
342 offset = offset.checked_add(field.size, dl)
343 .ok_or(LayoutError::SizeOverflow(ty))?;
347 let repr_align = repr.align as u64;
348 align = align.max(AbiAndPrefAlign::new(Align::from_bytes(repr_align).unwrap()));
349 debug!("univariant repr_align: {:?}", repr_align);
352 debug!("univariant min_size: {:?}", offset);
353 let min_size = offset;
355 // As stated above, inverse_memory_index holds field indices by increasing offset.
356 // This makes it an already-sorted view of the offsets vec.
357 // To invert it, consider:
358 // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
359 // Field 5 would be the first element, so memory_index is i:
360 // Note: if we didn't optimize, it's already right.
362 let mut memory_index;
364 memory_index = vec![0; inverse_memory_index.len()];
366 for i in 0..inverse_memory_index.len() {
367 memory_index[inverse_memory_index[i] as usize] = i as u32;
370 memory_index = inverse_memory_index;
373 let size = min_size.align_to(align.abi);
374 let mut abi = Abi::Aggregate { sized };
376 // Unpack newtype ABIs and find scalar pairs.
377 if sized && size.bytes() > 0 {
378 // All other fields must be ZSTs, and we need them to all start at 0.
379 let mut zst_offsets =
380 offsets.iter().enumerate().filter(|&(i, _)| fields[i].is_zst());
381 if zst_offsets.all(|(_, o)| o.bytes() == 0) {
382 let mut non_zst_fields =
383 fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
385 match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
386 // We have exactly one non-ZST field.
387 (Some((i, field)), None, None) => {
388 // Field fills the struct and it has a scalar or scalar pair ABI.
389 if offsets[i].bytes() == 0 &&
390 align.abi == field.align.abi &&
393 // For plain scalars, or vectors of them, we can't unpack
394 // newtypes for `#[repr(C)]`, as that affects C ABIs.
395 Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
396 abi = field.abi.clone();
398 // But scalar pairs are Rust-specific and get
399 // treated as aggregates by C ABIs anyway.
400 Abi::ScalarPair(..) => {
401 abi = field.abi.clone();
408 // Two non-ZST fields, and they're both scalars.
409 (Some((i, &TyLayout {
410 details: &LayoutDetails { abi: Abi::Scalar(ref a), .. }, ..
411 })), Some((j, &TyLayout {
412 details: &LayoutDetails { abi: Abi::Scalar(ref b), .. }, ..
414 // Order by the memory placement, not source order.
415 let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
420 let pair = scalar_pair(a.clone(), b.clone());
421 let pair_offsets = match pair.fields {
422 FieldPlacement::Arbitrary {
426 assert_eq!(memory_index, &[0, 1]);
431 if offsets[i] == pair_offsets[0] &&
432 offsets[j] == pair_offsets[1] &&
433 align == pair.align &&
435 // We can use `ScalarPair` only when it matches our
436 // already computed layout (including `#[repr(C)]`).
446 if sized && fields.iter().any(|f| f.abi.is_uninhabited()) {
447 abi = Abi::Uninhabited;
451 variants: Variants::Single { index: VariantIdx::new(0) },
452 fields: FieldPlacement::Arbitrary {
461 let univariant = |fields: &[TyLayout<'_>], repr: &ReprOptions, kind| {
462 Ok(tcx.intern_layout(univariant_uninterned(fields, repr, kind)?))
464 debug_assert!(!ty.has_infer_types());
469 tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
470 value: Int(I8, false),
475 tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
476 value: Int(I32, false),
477 valid_range: 0..=0x10FFFF
481 scalar(Int(Integer::from_attr(dl, attr::SignedInt(ity)), true))
484 scalar(Int(Integer::from_attr(dl, attr::UnsignedInt(ity)), false))
486 ty::Float(fty) => scalar(Float(fty)),
488 let mut ptr = scalar_unit(Pointer);
489 ptr.valid_range = 1..=*ptr.valid_range.end();
490 tcx.intern_layout(LayoutDetails::scalar(self, ptr))
495 tcx.intern_layout(LayoutDetails {
496 variants: Variants::Single { index: VariantIdx::new(0) },
497 fields: FieldPlacement::Union(0),
498 abi: Abi::Uninhabited,
504 // Potentially-fat pointers.
505 ty::Ref(_, pointee, _) |
506 ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
507 let mut data_ptr = scalar_unit(Pointer);
508 if !ty.is_unsafe_ptr() {
509 data_ptr.valid_range = 1..=*data_ptr.valid_range.end();
512 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
513 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
514 return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
517 let unsized_part = tcx.struct_tail(pointee);
518 let metadata = match unsized_part.sty {
520 return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
522 ty::Slice(_) | ty::Str => {
523 scalar_unit(Int(dl.ptr_sized_integer(), false))
526 let mut vtable = scalar_unit(Pointer);
527 vtable.valid_range = 1..=*vtable.valid_range.end();
530 _ => return Err(LayoutError::Unknown(unsized_part))
533 // Effectively a (ptr, meta) tuple.
534 tcx.intern_layout(scalar_pair(data_ptr, metadata))
537 // Arrays and slices.
538 ty::Array(element, mut count) => {
539 if count.has_projections() {
540 count = tcx.normalize_erasing_regions(param_env, count);
541 if count.has_projections() {
542 return Err(LayoutError::Unknown(ty));
546 let element = self.layout_of(element)?;
547 let count = count.unwrap_usize(tcx);
548 let size = element.size.checked_mul(count, dl)
549 .ok_or(LayoutError::SizeOverflow(ty))?;
551 let abi = if count != 0 && ty.conservative_is_privately_uninhabited(tcx) {
554 Abi::Aggregate { sized: true }
557 tcx.intern_layout(LayoutDetails {
558 variants: Variants::Single { index: VariantIdx::new(0) },
559 fields: FieldPlacement::Array {
560 stride: element.size,
564 align: element.align,
568 ty::Slice(element) => {
569 let element = self.layout_of(element)?;
570 tcx.intern_layout(LayoutDetails {
571 variants: Variants::Single { index: VariantIdx::new(0) },
572 fields: FieldPlacement::Array {
573 stride: element.size,
576 abi: Abi::Aggregate { sized: false },
577 align: element.align,
582 tcx.intern_layout(LayoutDetails {
583 variants: Variants::Single { index: VariantIdx::new(0) },
584 fields: FieldPlacement::Array {
585 stride: Size::from_bytes(1),
588 abi: Abi::Aggregate { sized: false },
596 univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?
598 ty::Dynamic(..) | ty::Foreign(..) => {
599 let mut unit = univariant_uninterned(&[], &ReprOptions::default(),
600 StructKind::AlwaysSized)?;
602 Abi::Aggregate { ref mut sized } => *sized = false,
605 tcx.intern_layout(unit)
608 ty::Generator(def_id, ref substs, _) => {
609 // FIXME(tmandry): For fields that are repeated in multiple
610 // variants in the GeneratorLayout, we need code to ensure that
611 // the offset of these fields never change. Right now this is
612 // not an issue since every variant has every field, but once we
613 // optimize this we have to be more careful.
615 let discr_index = substs.prefix_tys(def_id, tcx).count();
616 let prefix_tys = substs.prefix_tys(def_id, tcx)
617 .chain(iter::once(substs.discr_ty(tcx)));
618 let prefix = univariant_uninterned(
619 &prefix_tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
620 &ReprOptions::default(),
621 StructKind::AlwaysSized)?;
623 let mut size = prefix.size;
624 let mut align = prefix.align;
625 let variants_tys = substs.state_tys(def_id, tcx);
626 let variants = variants_tys.enumerate().map(|(i, variant_tys)| {
627 let mut variant = univariant_uninterned(
628 &variant_tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
629 &ReprOptions::default(),
630 StructKind::Prefixed(prefix.size, prefix.align.abi))?;
632 variant.variants = Variants::Single { index: VariantIdx::new(i) };
634 size = size.max(variant.size);
635 align = align.max(variant.align);
638 }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
640 let abi = if prefix.abi.is_uninhabited() ||
641 variants.iter().all(|v| v.abi.is_uninhabited()) {
644 Abi::Aggregate { sized: true }
646 let discr = match &self.layout_of(substs.discr_ty(tcx))?.abi {
647 Abi::Scalar(s) => s.clone(),
651 let layout = tcx.intern_layout(LayoutDetails {
652 variants: Variants::Multiple {
654 discr_kind: DiscriminantKind::Tag,
658 fields: prefix.fields,
663 debug!("generator layout: {:#?}", layout);
667 ty::Closure(def_id, ref substs) => {
668 let tys = substs.upvar_tys(def_id, tcx);
669 univariant(&tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
670 &ReprOptions::default(),
671 StructKind::AlwaysSized)?
675 let kind = if tys.len() == 0 {
676 StructKind::AlwaysSized
678 StructKind::MaybeUnsized
681 univariant(&tys.iter().map(|k| {
682 self.layout_of(k.expect_ty())
683 }).collect::<Result<Vec<_>, _>>()?, &ReprOptions::default(), kind)?
686 // SIMD vector types.
687 ty::Adt(def, ..) if def.repr.simd() => {
688 let element = self.layout_of(ty.simd_type(tcx))?;
689 let count = ty.simd_size(tcx) as u64;
691 let scalar = match element.abi {
692 Abi::Scalar(ref scalar) => scalar.clone(),
694 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` with \
695 a non-machine element type `{}`",
699 let size = element.size.checked_mul(count, dl)
700 .ok_or(LayoutError::SizeOverflow(ty))?;
701 let align = dl.vector_align(size);
702 let size = size.align_to(align.abi);
704 tcx.intern_layout(LayoutDetails {
705 variants: Variants::Single { index: VariantIdx::new(0) },
706 fields: FieldPlacement::Array {
707 stride: element.size,
720 ty::Adt(def, substs) => {
721 // Cache the field layouts.
722 let variants = def.variants.iter().map(|v| {
723 v.fields.iter().map(|field| {
724 self.layout_of(field.ty(tcx, substs))
725 }).collect::<Result<Vec<_>, _>>()
726 }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
729 let packed = def.repr.packed();
730 if packed && def.repr.align > 0 {
731 bug!("Union cannot be packed and aligned");
734 let pack = Align::from_bytes(def.repr.pack as u64).unwrap();
736 let mut align = if packed {
742 if def.repr.align > 0 {
743 let repr_align = def.repr.align as u64;
745 AbiAndPrefAlign::new(Align::from_bytes(repr_align).unwrap()));
748 let optimize = !def.repr.inhibit_union_abi_opt();
749 let mut size = Size::ZERO;
750 let mut abi = Abi::Aggregate { sized: true };
751 let index = VariantIdx::new(0);
752 for field in &variants[index] {
753 assert!(!field.is_unsized());
755 let field_align = if packed {
756 field.align.min(AbiAndPrefAlign::new(pack))
760 align = align.max(field_align);
762 // If all non-ZST fields have the same ABI, forward this ABI
763 if optimize && !field.is_zst() {
764 // Normalize scalar_unit to the maximal valid range
765 let field_abi = match &field.abi {
766 Abi::Scalar(x) => Abi::Scalar(scalar_unit(x.value)),
767 Abi::ScalarPair(x, y) => {
769 scalar_unit(x.value),
770 scalar_unit(y.value),
773 Abi::Vector { element: x, count } => {
775 element: scalar_unit(x.value),
780 Abi::Aggregate { .. } => Abi::Aggregate { sized: true },
783 if size == Size::ZERO {
784 // first non ZST: initialize 'abi'
786 } else if abi != field_abi {
787 // different fields have different ABI: reset to Aggregate
788 abi = Abi::Aggregate { sized: true };
792 size = cmp::max(size, field.size);
795 return Ok(tcx.intern_layout(LayoutDetails {
796 variants: Variants::Single { index },
797 fields: FieldPlacement::Union(variants[index].len()),
800 size: size.align_to(align.abi)
804 // A variant is absent if it's uninhabited and only has ZST fields.
805 // Present uninhabited variants only require space for their fields,
806 // but *not* an encoding of the discriminant (e.g., a tag value).
807 // See issue #49298 for more details on the need to leave space
808 // for non-ZST uninhabited data (mostly partial initialization).
809 let absent = |fields: &[TyLayout<'_>]| {
810 let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
811 let is_zst = fields.iter().all(|f| f.is_zst());
812 uninhabited && is_zst
814 let (present_first, present_second) = {
815 let mut present_variants = variants.iter_enumerated().filter_map(|(i, v)| {
822 (present_variants.next(), present_variants.next())
824 if present_first.is_none() {
825 // Uninhabited because it has no variants, or only absent ones.
826 return tcx.layout_raw(param_env.and(tcx.types.never));
829 let is_struct = !def.is_enum() ||
830 // Only one variant is present.
831 (present_second.is_none() &&
832 // Representation optimizations are allowed.
833 !def.repr.inhibit_enum_layout_opt());
835 // Struct, or univariant enum equivalent to a struct.
836 // (Typechecking will reject discriminant-sizing attrs.)
838 let v = present_first.unwrap();
839 let kind = if def.is_enum() || variants[v].len() == 0 {
840 StructKind::AlwaysSized
842 let param_env = tcx.param_env(def.did);
843 let last_field = def.variants[v].fields.last().unwrap();
844 let always_sized = tcx.type_of(last_field.did)
845 .is_sized(tcx.at(DUMMY_SP), param_env);
846 if !always_sized { StructKind::MaybeUnsized }
847 else { StructKind::AlwaysSized }
850 let mut st = univariant_uninterned(&variants[v], &def.repr, kind)?;
851 st.variants = Variants::Single { index: v };
852 let (start, end) = self.tcx.layout_scalar_valid_range(def.did);
854 Abi::Scalar(ref mut scalar) |
855 Abi::ScalarPair(ref mut scalar, _) => {
856 // the asserts ensure that we are not using the
857 // `#[rustc_layout_scalar_valid_range(n)]`
858 // attribute to widen the range of anything as that would probably
859 // result in UB somewhere
860 if let Bound::Included(start) = start {
861 assert!(*scalar.valid_range.start() <= start);
862 scalar.valid_range = start..=*scalar.valid_range.end();
864 if let Bound::Included(end) = end {
865 assert!(*scalar.valid_range.end() >= end);
866 scalar.valid_range = *scalar.valid_range.start()..=end;
870 start == Bound::Unbounded && end == Bound::Unbounded,
871 "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
876 return Ok(tcx.intern_layout(st));
879 // The current code for niche-filling relies on variant indices
880 // instead of actual discriminants, so dataful enums with
881 // explicit discriminants (RFC #2363) would misbehave.
882 let no_explicit_discriminants = def.variants.iter_enumerated()
883 .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
885 // Niche-filling enum optimization.
886 if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
887 let mut dataful_variant = None;
888 let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
890 // Find one non-ZST variant.
891 'variants: for (v, fields) in variants.iter_enumerated() {
897 if dataful_variant.is_none() {
898 dataful_variant = Some(v);
901 dataful_variant = None;
906 niche_variants = *niche_variants.start().min(&v)..=v;
909 if niche_variants.start() > niche_variants.end() {
910 dataful_variant = None;
913 if let Some(i) = dataful_variant {
915 niche_variants.end().as_u32() - niche_variants.start().as_u32() + 1
917 for (field_index, &field) in variants[i].iter().enumerate() {
918 let niche = match self.find_niche(field)? {
919 Some(niche) => niche,
922 let (niche_start, niche_scalar) = match niche.reserve(self, count) {
927 let mut align = dl.aggregate_align;
928 let st = variants.iter_enumerated().map(|(j, v)| {
929 let mut st = univariant_uninterned(v,
930 &def.repr, StructKind::AlwaysSized)?;
931 st.variants = Variants::Single { index: j };
933 align = align.max(st.align);
936 }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
938 let offset = st[i].fields.offset(field_index) + niche.offset;
939 let size = st[i].size;
941 let mut abi = match st[i].abi {
942 Abi::Scalar(_) => Abi::Scalar(niche_scalar.clone()),
943 Abi::ScalarPair(ref first, ref second) => {
944 // We need to use scalar_unit to reset the
945 // valid range to the maximal one for that
946 // primitive, because only the niche is
947 // guaranteed to be initialised, not the
949 if offset.bytes() == 0 {
951 niche_scalar.clone(),
952 scalar_unit(second.value),
956 scalar_unit(first.value),
957 niche_scalar.clone(),
961 _ => Abi::Aggregate { sized: true },
964 if st.iter().all(|v| v.abi.is_uninhabited()) {
965 abi = Abi::Uninhabited;
968 return Ok(tcx.intern_layout(LayoutDetails {
969 variants: Variants::Multiple {
971 discr_kind: DiscriminantKind::Niche {
979 fields: FieldPlacement::Arbitrary {
980 offsets: vec![offset],
981 memory_index: vec![0]
991 let (mut min, mut max) = (i128::max_value(), i128::min_value());
992 let discr_type = def.repr.discr_type();
993 let bits = Integer::from_attr(self, discr_type).size().bits();
994 for (i, discr) in def.discriminants(tcx) {
995 if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
998 let mut x = discr.val as i128;
999 if discr_type.is_signed() {
1000 // sign extend the raw representation to be an i128
1001 x = (x << (128 - bits)) >> (128 - bits);
1003 if x < min { min = x; }
1004 if x > max { max = x; }
1006 // We might have no inhabited variants, so pretend there's at least one.
1007 if (min, max) == (i128::max_value(), i128::min_value()) {
1011 assert!(min <= max, "discriminant range is {}...{}", min, max);
1012 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
1014 let mut align = dl.aggregate_align;
1015 let mut size = Size::ZERO;
1017 // We're interested in the smallest alignment, so start large.
1018 let mut start_align = Align::from_bytes(256).unwrap();
1019 assert_eq!(Integer::for_align(dl, start_align), None);
1021 // repr(C) on an enum tells us to make a (tag, union) layout,
1022 // so we need to grow the prefix alignment to be at least
1023 // the alignment of the union. (This value is used both for
1024 // determining the alignment of the overall enum, and the
1025 // determining the alignment of the payload after the tag.)
1026 let mut prefix_align = min_ity.align(dl).abi;
1028 for fields in &variants {
1029 for field in fields {
1030 prefix_align = prefix_align.max(field.align.abi);
1035 // Create the set of structs that represent each variant.
1036 let mut layout_variants = variants.iter_enumerated().map(|(i, field_layouts)| {
1037 let mut st = univariant_uninterned(&field_layouts,
1038 &def.repr, StructKind::Prefixed(min_ity.size(), prefix_align))?;
1039 st.variants = Variants::Single { index: i };
1040 // Find the first field we can't move later
1041 // to make room for a larger discriminant.
1042 for field in st.fields.index_by_increasing_offset().map(|j| field_layouts[j]) {
1043 if !field.is_zst() || field.align.abi.bytes() != 1 {
1044 start_align = start_align.min(field.align.abi);
1048 size = cmp::max(size, st.size);
1049 align = align.max(st.align);
1051 }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1053 // Align the maximum variant size to the largest alignment.
1054 size = size.align_to(align.abi);
1056 if size.bytes() >= dl.obj_size_bound() {
1057 return Err(LayoutError::SizeOverflow(ty));
1060 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
1061 if typeck_ity < min_ity {
1062 // It is a bug if Layout decided on a greater discriminant size than typeck for
1063 // some reason at this point (based on values discriminant can take on). Mostly
1064 // because this discriminant will be loaded, and then stored into variable of
1065 // type calculated by typeck. Consider such case (a bug): typeck decided on
1066 // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1067 // discriminant values. That would be a bug, because then, in codegen, in order
1068 // to store this 16-bit discriminant into 8-bit sized temporary some of the
1069 // space necessary to represent would have to be discarded (or layout is wrong
1070 // on thinking it needs 16 bits)
1071 bug!("layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1072 min_ity, typeck_ity);
1073 // However, it is fine to make discr type however large (as an optimisation)
1074 // after this point – we’ll just truncate the value we load in codegen.
1077 // Check to see if we should use a different type for the
1078 // discriminant. We can safely use a type with the same size
1079 // as the alignment of the first field of each variant.
1080 // We increase the size of the discriminant to avoid LLVM copying
1081 // padding when it doesn't need to. This normally causes unaligned
1082 // load/stores and excessive memcpy/memset operations. By using a
1083 // bigger integer size, LLVM can be sure about its contents and
1084 // won't be so conservative.
1086 // Use the initial field alignment
1087 let mut ity = if def.repr.c() || def.repr.int.is_some() {
1090 Integer::for_align(dl, start_align).unwrap_or(min_ity)
1093 // If the alignment is not larger than the chosen discriminant size,
1094 // don't use the alignment as the final size.
1098 // Patch up the variants' first few fields.
1099 let old_ity_size = min_ity.size();
1100 let new_ity_size = ity.size();
1101 for variant in &mut layout_variants {
1102 match variant.fields {
1103 FieldPlacement::Arbitrary { ref mut offsets, .. } => {
1105 if *i <= old_ity_size {
1106 assert_eq!(*i, old_ity_size);
1110 // We might be making the struct larger.
1111 if variant.size <= old_ity_size {
1112 variant.size = new_ity_size;
1120 let tag_mask = !0u128 >> (128 - ity.size().bits());
1122 value: Int(ity, signed),
1123 valid_range: (min as u128 & tag_mask)..=(max as u128 & tag_mask),
1125 let mut abi = Abi::Aggregate { sized: true };
1126 if tag.value.size(dl) == size {
1127 abi = Abi::Scalar(tag.clone());
1129 // Try to use a ScalarPair for all tagged enums.
1130 let mut common_prim = None;
1131 for (field_layouts, layout_variant) in variants.iter().zip(&layout_variants) {
1132 let offsets = match layout_variant.fields {
1133 FieldPlacement::Arbitrary { ref offsets, .. } => offsets,
1136 let mut fields = field_layouts
1139 .filter(|p| !p.0.is_zst());
1140 let (field, offset) = match (fields.next(), fields.next()) {
1141 (None, None) => continue,
1142 (Some(pair), None) => pair,
1148 let prim = match field.details.abi {
1149 Abi::Scalar(ref scalar) => scalar.value,
1155 if let Some(pair) = common_prim {
1156 // This is pretty conservative. We could go fancier
1157 // by conflating things like i32 and u32, or even
1158 // realising that (u8, u8) could just cohabit with
1160 if pair != (prim, offset) {
1165 common_prim = Some((prim, offset));
1168 if let Some((prim, offset)) = common_prim {
1169 let pair = scalar_pair(tag.clone(), scalar_unit(prim));
1170 let pair_offsets = match pair.fields {
1171 FieldPlacement::Arbitrary {
1175 assert_eq!(memory_index, &[0, 1]);
1180 if pair_offsets[0] == Size::ZERO &&
1181 pair_offsets[1] == *offset &&
1182 align == pair.align &&
1184 // We can use `ScalarPair` only when it matches our
1185 // already computed layout (including `#[repr(C)]`).
1191 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1192 abi = Abi::Uninhabited;
1195 tcx.intern_layout(LayoutDetails {
1196 variants: Variants::Multiple {
1198 discr_kind: DiscriminantKind::Tag,
1200 variants: layout_variants,
1202 fields: FieldPlacement::Arbitrary {
1203 offsets: vec![Size::ZERO],
1204 memory_index: vec![0]
1212 // Types with no meaningful known layout.
1213 ty::Projection(_) | ty::Opaque(..) => {
1214 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1215 if ty == normalized {
1216 return Err(LayoutError::Unknown(ty));
1218 tcx.layout_raw(param_env.and(normalized))?
1222 ty::Placeholder(..) |
1223 ty::UnnormalizedProjection(..) |
1224 ty::GeneratorWitness(..) |
1226 bug!("LayoutDetails::compute: unexpected type `{}`", ty)
1229 ty::Param(_) | ty::Error => {
1230 return Err(LayoutError::Unknown(ty));
1235 /// This is invoked by the `layout_raw` query to record the final
1236 /// layout of each type.
1238 fn record_layout_for_printing(&self, layout: TyLayout<'tcx>) {
1239 // If we are running with `-Zprint-type-sizes`, maybe record layouts
1240 // for dumping later.
1241 if self.tcx.sess.opts.debugging_opts.print_type_sizes {
1242 self.record_layout_for_printing_outlined(layout)
1246 fn record_layout_for_printing_outlined(&self, layout: TyLayout<'tcx>) {
1247 // Ignore layouts that are done with non-empty environments or
1248 // non-monomorphic layouts, as the user only wants to see the stuff
1249 // resulting from the final codegen session.
1251 layout.ty.has_param_types() ||
1252 layout.ty.has_self_ty() ||
1253 !self.param_env.caller_bounds.is_empty()
1258 // (delay format until we actually need it)
1259 let record = |kind, packed, opt_discr_size, variants| {
1260 let type_desc = format!("{:?}", layout.ty);
1261 self.tcx.sess.code_stats.borrow_mut().record_type_size(kind,
1270 let adt_def = match layout.ty.sty {
1271 ty::Adt(ref adt_def, _) => {
1272 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1276 ty::Closure(..) => {
1277 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1278 record(DataTypeKind::Closure, false, None, vec![]);
1283 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1288 let adt_kind = adt_def.adt_kind();
1289 let adt_packed = adt_def.repr.packed();
1291 let build_variant_info = |n: Option<Ident>,
1293 layout: TyLayout<'tcx>| {
1294 let mut min_size = Size::ZERO;
1295 let field_info: Vec<_> = flds.iter().enumerate().map(|(i, &name)| {
1296 match layout.field(self, i) {
1298 bug!("no layout found for field {}: `{:?}`", name, err);
1300 Ok(field_layout) => {
1301 let offset = layout.fields.offset(i);
1302 let field_end = offset + field_layout.size;
1303 if min_size < field_end {
1304 min_size = field_end;
1306 session::FieldInfo {
1307 name: name.to_string(),
1308 offset: offset.bytes(),
1309 size: field_layout.size.bytes(),
1310 align: field_layout.align.abi.bytes(),
1316 session::VariantInfo {
1317 name: n.map(|n| n.to_string()),
1318 kind: if layout.is_unsized() {
1319 session::SizeKind::Min
1321 session::SizeKind::Exact
1323 align: layout.align.abi.bytes(),
1324 size: if min_size.bytes() == 0 {
1333 match layout.variants {
1334 Variants::Single { index } => {
1335 debug!("print-type-size `{:#?}` variant {}",
1336 layout, adt_def.variants[index].ident);
1337 if !adt_def.variants.is_empty() {
1338 let variant_def = &adt_def.variants[index];
1339 let fields: Vec<_> =
1340 variant_def.fields.iter().map(|f| f.ident.name).collect();
1341 record(adt_kind.into(),
1344 vec![build_variant_info(Some(variant_def.ident),
1348 // (This case arises for *empty* enums; so give it
1350 record(adt_kind.into(), adt_packed, None, vec![]);
1354 Variants::Multiple { ref discr, ref discr_kind, .. } => {
1355 debug!("print-type-size `{:#?}` adt general variants def {}",
1356 layout.ty, adt_def.variants.len());
1357 let variant_infos: Vec<_> =
1358 adt_def.variants.iter_enumerated().map(|(i, variant_def)| {
1359 let fields: Vec<_> =
1360 variant_def.fields.iter().map(|f| f.ident.name).collect();
1361 build_variant_info(Some(variant_def.ident),
1363 layout.for_variant(self, i))
1366 record(adt_kind.into(), adt_packed, match discr_kind {
1367 DiscriminantKind::Tag => Some(discr.value.size(self)),
1375 /// Type size "skeleton", i.e., the only information determining a type's size.
1376 /// While this is conservative, (aside from constant sizes, only pointers,
1377 /// newtypes thereof and null pointer optimized enums are allowed), it is
1378 /// enough to statically check common use cases of transmute.
1379 #[derive(Copy, Clone, Debug)]
1380 pub enum SizeSkeleton<'tcx> {
1381 /// Any statically computable Layout.
1384 /// A potentially-fat pointer.
1386 /// If true, this pointer is never null.
1388 /// The type which determines the unsized metadata, if any,
1389 /// of this pointer. Either a type parameter or a projection
1390 /// depending on one, with regions erased.
1395 impl<'a, 'tcx> SizeSkeleton<'tcx> {
1396 pub fn compute(ty: Ty<'tcx>,
1397 tcx: TyCtxt<'a, 'tcx, 'tcx>,
1398 param_env: ty::ParamEnv<'tcx>)
1399 -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1400 debug_assert!(!ty.has_infer_types());
1402 // First try computing a static layout.
1403 let err = match tcx.layout_of(param_env.and(ty)) {
1405 return Ok(SizeSkeleton::Known(layout.size));
1411 ty::Ref(_, pointee, _) |
1412 ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1413 let non_zero = !ty.is_unsafe_ptr();
1414 let tail = tcx.struct_tail(pointee);
1416 ty::Param(_) | ty::Projection(_) => {
1417 debug_assert!(tail.has_param_types() || tail.has_self_ty());
1418 Ok(SizeSkeleton::Pointer {
1420 tail: tcx.erase_regions(&tail)
1424 bug!("SizeSkeleton::compute({}): layout errored ({}), yet \
1425 tail `{}` is not a type parameter or a projection",
1431 ty::Adt(def, substs) => {
1432 // Only newtypes and enums w/ nullable pointer optimization.
1433 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
1437 // Get a zero-sized variant or a pointer newtype.
1438 let zero_or_ptr_variant = |i| {
1439 let i = VariantIdx::new(i);
1440 let fields = def.variants[i].fields.iter().map(|field| {
1441 SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env)
1444 for field in fields {
1447 SizeSkeleton::Known(size) => {
1448 if size.bytes() > 0 {
1452 SizeSkeleton::Pointer {..} => {
1463 let v0 = zero_or_ptr_variant(0)?;
1465 if def.variants.len() == 1 {
1466 if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1467 return Ok(SizeSkeleton::Pointer {
1468 non_zero: non_zero || match tcx.layout_scalar_valid_range(def.did) {
1469 (Bound::Included(start), Bound::Unbounded) => start > 0,
1470 (Bound::Included(start), Bound::Included(end)) =>
1471 0 < start && start < end,
1481 let v1 = zero_or_ptr_variant(1)?;
1482 // Nullable pointer enum optimization.
1484 (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None) |
1485 (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
1486 Ok(SizeSkeleton::Pointer {
1495 ty::Projection(_) | ty::Opaque(..) => {
1496 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1497 if ty == normalized {
1500 SizeSkeleton::compute(normalized, tcx, param_env)
1508 pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
1509 match (self, other) {
1510 (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
1511 (SizeSkeleton::Pointer { tail: a, .. },
1512 SizeSkeleton::Pointer { tail: b, .. }) => a == b,
1518 pub trait HasTyCtxt<'tcx>: HasDataLayout {
1519 fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx>;
1522 pub trait HasParamEnv<'tcx> {
1523 fn param_env(&self) -> ty::ParamEnv<'tcx>;
1526 impl<'a, 'gcx, 'tcx> HasDataLayout for TyCtxt<'a, 'gcx, 'tcx> {
1527 fn data_layout(&self) -> &TargetDataLayout {
1532 impl<'a, 'gcx, 'tcx> HasTyCtxt<'gcx> for TyCtxt<'a, 'gcx, 'tcx> {
1533 fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> {
1538 impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
1539 fn param_env(&self) -> ty::ParamEnv<'tcx> {
1544 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
1545 fn data_layout(&self) -> &TargetDataLayout {
1546 self.tcx.data_layout()
1550 impl<'gcx, 'tcx, T: HasTyCtxt<'gcx>> HasTyCtxt<'gcx> for LayoutCx<'tcx, T> {
1551 fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> {
1556 pub trait MaybeResult<T> {
1559 fn from(x: Result<T, Self::Error>) -> Self;
1560 fn to_result(self) -> Result<T, Self::Error>;
1563 impl<T> MaybeResult<T> for T {
1566 fn from(x: Result<T, Self::Error>) -> Self {
1570 fn to_result(self) -> Result<T, Self::Error> {
1575 impl<T, E> MaybeResult<T> for Result<T, E> {
1578 fn from(x: Result<T, Self::Error>) -> Self {
1581 fn to_result(self) -> Result<T, Self::Error> {
1586 pub type TyLayout<'tcx> = ::rustc_target::abi::TyLayout<'tcx, Ty<'tcx>>;
1588 impl<'a, 'tcx> LayoutOf for LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
1590 type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
1592 /// Computes the layout of a type. Note that this implicitly
1593 /// executes in "reveal all" mode.
1594 fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
1595 let param_env = self.param_env.with_reveal_all();
1596 let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1597 let details = self.tcx.layout_raw(param_env.and(ty))?;
1598 let layout = TyLayout {
1603 // N.B., this recording is normally disabled; when enabled, it
1604 // can however trigger recursive invocations of `layout_of`.
1605 // Therefore, we execute it *after* the main query has
1606 // completed, to avoid problems around recursive structures
1607 // and the like. (Admittedly, I wasn't able to reproduce a problem
1608 // here, but it seems like the right thing to do. -nmatsakis)
1609 self.record_layout_for_printing(layout);
1615 impl<'a, 'tcx> LayoutOf for LayoutCx<'tcx, ty::query::TyCtxtAt<'a, 'tcx, 'tcx>> {
1617 type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
1619 /// Computes the layout of a type. Note that this implicitly
1620 /// executes in "reveal all" mode.
1621 fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
1622 let param_env = self.param_env.with_reveal_all();
1623 let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1624 let details = self.tcx.layout_raw(param_env.and(ty))?;
1625 let layout = TyLayout {
1630 // N.B., this recording is normally disabled; when enabled, it
1631 // can however trigger recursive invocations of `layout_of`.
1632 // Therefore, we execute it *after* the main query has
1633 // completed, to avoid problems around recursive structures
1634 // and the like. (Admittedly, I wasn't able to reproduce a problem
1635 // here, but it seems like the right thing to do. -nmatsakis)
1638 param_env: self.param_env
1640 cx.record_layout_for_printing(layout);
1646 // Helper (inherent) `layout_of` methods to avoid pushing `LayoutCx` to users.
1647 impl TyCtxt<'a, 'tcx, '_> {
1648 /// Computes the layout of a type. Note that this implicitly
1649 /// executes in "reveal all" mode.
1651 pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
1652 -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
1654 tcx: self.global_tcx(),
1655 param_env: param_env_and_ty.param_env
1657 cx.layout_of(param_env_and_ty.value)
1661 impl ty::query::TyCtxtAt<'a, 'tcx, '_> {
1662 /// Computes the layout of a type. Note that this implicitly
1663 /// executes in "reveal all" mode.
1665 pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
1666 -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
1668 tcx: self.global_tcx().at(self.span),
1669 param_env: param_env_and_ty.param_env
1671 cx.layout_of(param_env_and_ty.value)
1675 impl<'a, 'tcx, C> TyLayoutMethods<'tcx, C> for Ty<'tcx>
1676 where C: LayoutOf<Ty = Ty<'tcx>> + HasTyCtxt<'tcx>,
1677 C::TyLayout: MaybeResult<TyLayout<'tcx>>,
1678 C: HasParamEnv<'tcx>
1680 fn for_variant(this: TyLayout<'tcx>, cx: &C, variant_index: VariantIdx) -> TyLayout<'tcx> {
1681 let details = match this.variants {
1682 Variants::Single { index } if index == variant_index => this.details,
1684 Variants::Single { index } => {
1685 // Deny calling for_variant more than once for non-Single enums.
1686 if let Ok(layout) = cx.layout_of(this.ty).to_result() {
1687 assert_eq!(layout.variants, Variants::Single { index });
1690 let fields = match this.ty.sty {
1691 ty::Adt(def, _) => def.variants[variant_index].fields.len(),
1695 tcx.intern_layout(LayoutDetails {
1696 variants: Variants::Single { index: variant_index },
1697 fields: FieldPlacement::Union(fields),
1698 abi: Abi::Uninhabited,
1699 align: tcx.data_layout.i8_align,
1704 Variants::Multiple { ref variants, .. } => {
1705 &variants[variant_index]
1709 assert_eq!(details.variants, Variants::Single { index: variant_index });
1717 fn field(this: TyLayout<'tcx>, cx: &C, i: usize) -> C::TyLayout {
1719 let discr_layout = |discr: &Scalar| -> C::TyLayout {
1720 let layout = LayoutDetails::scalar(cx, discr.clone());
1721 MaybeResult::from(Ok(TyLayout {
1722 details: tcx.intern_layout(layout),
1723 ty: discr.value.to_ty(tcx),
1727 cx.layout_of(match this.ty.sty {
1736 ty::GeneratorWitness(..) |
1738 ty::Dynamic(..) => {
1739 bug!("TyLayout::field_type({:?}): not applicable", this)
1742 // Potentially-fat pointers.
1743 ty::Ref(_, pointee, _) |
1744 ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1745 assert!(i < this.fields.count());
1747 // Reuse the fat *T type as its own thin pointer data field.
1748 // This provides information about e.g., DST struct pointees
1749 // (which may have no non-DST form), and will work as long
1750 // as the `Abi` or `FieldPlacement` is checked by users.
1752 let nil = tcx.mk_unit();
1753 let ptr_ty = if this.ty.is_unsafe_ptr() {
1756 tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
1758 return MaybeResult::from(cx.layout_of(ptr_ty).to_result().map(|mut ptr_layout| {
1759 ptr_layout.ty = this.ty;
1764 match tcx.struct_tail(pointee).sty {
1766 ty::Str => tcx.types.usize,
1767 ty::Dynamic(_, _) => {
1769 tcx.lifetimes.re_static,
1770 tcx.mk_array(tcx.types.usize, 3),
1772 /* FIXME: use actual fn pointers
1773 Warning: naively computing the number of entries in the
1774 vtable by counting the methods on the trait + methods on
1775 all parent traits does not work, because some methods can
1776 be not object safe and thus excluded from the vtable.
1777 Increase this counter if you tried to implement this but
1778 failed to do it without duplicating a lot of code from
1779 other places in the compiler: 2
1781 tcx.mk_array(tcx.types.usize, 3),
1782 tcx.mk_array(Option<fn()>),
1786 _ => bug!("TyLayout::field_type({:?}): not applicable", this)
1790 // Arrays and slices.
1791 ty::Array(element, _) |
1792 ty::Slice(element) => element,
1793 ty::Str => tcx.types.u8,
1795 // Tuples, generators and closures.
1796 ty::Closure(def_id, ref substs) => {
1797 substs.upvar_tys(def_id, tcx).nth(i).unwrap()
1800 ty::Generator(def_id, ref substs, _) => {
1801 match this.variants {
1802 Variants::Single { index } => {
1803 substs.state_tys(def_id, tcx)
1804 .nth(index.as_usize()).unwrap()
1807 Variants::Multiple { ref discr, discr_index, .. } => {
1808 if i == discr_index {
1809 return discr_layout(discr);
1811 substs.prefix_tys(def_id, tcx).nth(i).unwrap()
1816 ty::Tuple(tys) => tys[i].expect_ty(),
1818 // SIMD vector types.
1819 ty::Adt(def, ..) if def.repr.simd() => {
1820 this.ty.simd_type(tcx)
1824 ty::Adt(def, substs) => {
1825 match this.variants {
1826 Variants::Single { index } => {
1827 def.variants[index].fields[i].ty(tcx, substs)
1830 // Discriminant field for enums (where applicable).
1831 Variants::Multiple { ref discr, .. } => {
1833 return discr_layout(discr);
1838 ty::Projection(_) | ty::UnnormalizedProjection(..) | ty::Bound(..) |
1839 ty::Placeholder(..) | ty::Opaque(..) | ty::Param(_) | ty::Infer(_) |
1841 bug!("TyLayout::field_type: unexpected type `{}`", this.ty)
1847 this: TyLayout<'tcx>,
1850 ) -> Option<PointeeInfo> {
1852 ty::RawPtr(mt) if offset.bytes() == 0 => {
1853 cx.layout_of(mt.ty).to_result().ok()
1854 .map(|layout| PointeeInfo {
1856 align: layout.align.abi,
1861 ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
1863 let is_freeze = ty.is_freeze(tcx, cx.param_env(), DUMMY_SP);
1864 let kind = match mt {
1865 hir::MutImmutable => if is_freeze {
1870 hir::MutMutable => {
1871 // Previously we would only emit noalias annotations for LLVM >= 6 or in
1872 // panic=abort mode. That was deemed right, as prior versions had many bugs
1873 // in conjunction with unwinding, but later versions didn’t seem to have
1874 // said issues. See issue #31681.
1876 // Alas, later on we encountered a case where noalias would generate wrong
1877 // code altogether even with recent versions of LLVM in *safe* code with no
1878 // unwinding involved. See #54462.
1880 // For now, do not enable mutable_noalias by default at all, while the
1881 // issue is being figured out.
1882 let mutable_noalias = tcx.sess.opts.debugging_opts.mutable_noalias
1884 if mutable_noalias {
1885 PointerKind::UniqueBorrowed
1892 cx.layout_of(ty).to_result().ok()
1893 .map(|layout| PointeeInfo {
1895 align: layout.align.abi,
1901 let mut data_variant = match this.variants {
1902 // Within the discriminant field, only the niche itself is
1903 // always initialized, so we only check for a pointer at its
1906 // If the niche is a pointer, it's either valid (according
1907 // to its type), or null (which the niche field's scalar
1908 // validity range encodes). This allows using
1909 // `dereferenceable_or_null` for e.g., `Option<&T>`, and
1910 // this will continue to work as long as we don't start
1911 // using more niches than just null (e.g., the first page of
1912 // the address space, or unaligned pointers).
1913 Variants::Multiple {
1914 discr_kind: DiscriminantKind::Niche {
1920 } if this.fields.offset(discr_index) == offset =>
1921 Some(this.for_variant(cx, dataful_variant)),
1925 if let Some(variant) = data_variant {
1926 // We're not interested in any unions.
1927 if let FieldPlacement::Union(_) = variant.fields {
1928 data_variant = None;
1932 let mut result = None;
1934 if let Some(variant) = data_variant {
1935 let ptr_end = offset + Pointer.size(cx);
1936 for i in 0..variant.fields.count() {
1937 let field_start = variant.fields.offset(i);
1938 if field_start <= offset {
1939 let field = variant.field(cx, i);
1940 result = field.to_result().ok()
1942 if ptr_end <= field_start + field.size {
1943 // We found the right field, look inside it.
1944 field.pointee_info_at(cx, offset - field_start)
1949 if result.is_some() {
1956 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
1957 if let Some(ref mut pointee) = result {
1958 if let ty::Adt(def, _) = this.ty.sty {
1959 if def.is_box() && offset.bytes() == 0 {
1960 pointee.safe = Some(PointerKind::UniqueOwned);
1978 fn reserve<'a, 'tcx>(
1980 cx: &LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>>,
1982 ) -> Option<(u128, Scalar)> {
1983 if count > self.available {
1986 let Scalar { value, valid_range: ref v } = self.scalar;
1987 let bits = value.size(cx).bits();
1988 assert!(bits <= 128);
1989 let max_value = !0u128 >> (128 - bits);
1990 let start = v.end().wrapping_add(1) & max_value;
1991 let end = v.end().wrapping_add(count) & max_value;
1992 Some((start, Scalar { value, valid_range: *v.start()..=end }))
1996 impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
1997 /// Find the offset of a niche leaf field, starting from
1998 /// the given type and recursing through aggregates.
1999 // FIXME(eddyb) traverse already optimized enums.
2000 fn find_niche(&self, layout: TyLayout<'tcx>) -> Result<Option<Niche>, LayoutError<'tcx>> {
2001 let scalar_niche = |scalar: &Scalar, offset| {
2002 let Scalar { value, valid_range: ref v } = *scalar;
2004 let bits = value.size(self).bits();
2005 assert!(bits <= 128);
2006 let max_value = !0u128 >> (128 - bits);
2008 // Find out how many values are outside the valid range.
2009 let available = if v.start() <= v.end() {
2010 v.start() + (max_value - v.end())
2012 v.start() - v.end() - 1
2015 // Give up if there is no niche value available.
2020 Some(Niche { offset, scalar: scalar.clone(), available })
2023 // Locals variables which live across yields are stored
2024 // in the generator type as fields. These may be uninitialized
2025 // so we don't look for niches there.
2026 if let ty::Generator(..) = layout.ty.sty {
2031 Abi::Scalar(ref scalar) => {
2032 return Ok(scalar_niche(scalar, Size::ZERO));
2034 Abi::ScalarPair(ref a, ref b) => {
2035 // HACK(nox): We iter on `b` and then `a` because `max_by_key`
2036 // returns the last maximum.
2037 let niche = iter::once(
2038 (b, a.value.size(self).align_to(b.value.align(self).abi))
2040 .chain(iter::once((a, Size::ZERO)))
2041 .filter_map(|(scalar, offset)| scalar_niche(scalar, offset))
2042 .max_by_key(|niche| niche.available);
2045 Abi::Vector { ref element, .. } => {
2046 return Ok(scalar_niche(element, Size::ZERO));
2051 // Perhaps one of the fields is non-zero, let's recurse and find out.
2052 if let FieldPlacement::Union(_) = layout.fields {
2053 // Only Rust enums have safe-to-inspect fields
2054 // (a discriminant), other unions are unsafe.
2055 if let Variants::Single { .. } = layout.variants {
2059 if let FieldPlacement::Array { count: original_64_bit_count, .. } = layout.fields {
2060 // rust-lang/rust#57038: avoid ICE within FieldPlacement::count when count too big
2061 if original_64_bit_count > usize::max_value() as u64 {
2062 return Err(LayoutError::SizeOverflow(layout.ty));
2064 if layout.fields.count() > 0 {
2065 return self.find_niche(layout.field(self, 0)?);
2070 let mut niche = None;
2071 let mut available = 0;
2072 for i in 0..layout.fields.count() {
2073 if let Some(mut c) = self.find_niche(layout.field(self, i)?)? {
2074 if c.available > available {
2075 available = c.available;
2076 c.offset += layout.fields.offset(i);
2085 impl<'a> HashStable<StableHashingContext<'a>> for Variants {
2086 fn hash_stable<W: StableHasherResult>(&self,
2087 hcx: &mut StableHashingContext<'a>,
2088 hasher: &mut StableHasher<W>) {
2089 use crate::ty::layout::Variants::*;
2090 mem::discriminant(self).hash_stable(hcx, hasher);
2093 Single { index } => {
2094 index.hash_stable(hcx, hasher);
2102 discr.hash_stable(hcx, hasher);
2103 discr_kind.hash_stable(hcx, hasher);
2104 discr_index.hash_stable(hcx, hasher);
2105 variants.hash_stable(hcx, hasher);
2111 impl<'a> HashStable<StableHashingContext<'a>> for DiscriminantKind {
2112 fn hash_stable<W: StableHasherResult>(&self,
2113 hcx: &mut StableHashingContext<'a>,
2114 hasher: &mut StableHasher<W>) {
2115 use crate::ty::layout::DiscriminantKind::*;
2116 mem::discriminant(self).hash_stable(hcx, hasher);
2125 dataful_variant.hash_stable(hcx, hasher);
2126 niche_variants.start().hash_stable(hcx, hasher);
2127 niche_variants.end().hash_stable(hcx, hasher);
2128 niche_start.hash_stable(hcx, hasher);
2134 impl<'a> HashStable<StableHashingContext<'a>> for FieldPlacement {
2135 fn hash_stable<W: StableHasherResult>(&self,
2136 hcx: &mut StableHashingContext<'a>,
2137 hasher: &mut StableHasher<W>) {
2138 use crate::ty::layout::FieldPlacement::*;
2139 mem::discriminant(self).hash_stable(hcx, hasher);
2143 count.hash_stable(hcx, hasher);
2145 Array { count, stride } => {
2146 count.hash_stable(hcx, hasher);
2147 stride.hash_stable(hcx, hasher);
2149 Arbitrary { ref offsets, ref memory_index } => {
2150 offsets.hash_stable(hcx, hasher);
2151 memory_index.hash_stable(hcx, hasher);
2157 impl<'a> HashStable<StableHashingContext<'a>> for VariantIdx {
2158 fn hash_stable<W: StableHasherResult>(
2160 hcx: &mut StableHashingContext<'a>,
2161 hasher: &mut StableHasher<W>,
2163 self.as_u32().hash_stable(hcx, hasher)
2167 impl<'a> HashStable<StableHashingContext<'a>> for Abi {
2168 fn hash_stable<W: StableHasherResult>(&self,
2169 hcx: &mut StableHashingContext<'a>,
2170 hasher: &mut StableHasher<W>) {
2171 use crate::ty::layout::Abi::*;
2172 mem::discriminant(self).hash_stable(hcx, hasher);
2176 Scalar(ref value) => {
2177 value.hash_stable(hcx, hasher);
2179 ScalarPair(ref a, ref b) => {
2180 a.hash_stable(hcx, hasher);
2181 b.hash_stable(hcx, hasher);
2183 Vector { ref element, count } => {
2184 element.hash_stable(hcx, hasher);
2185 count.hash_stable(hcx, hasher);
2187 Aggregate { sized } => {
2188 sized.hash_stable(hcx, hasher);
2194 impl<'a> HashStable<StableHashingContext<'a>> for Scalar {
2195 fn hash_stable<W: StableHasherResult>(&self,
2196 hcx: &mut StableHashingContext<'a>,
2197 hasher: &mut StableHasher<W>) {
2198 let Scalar { value, ref valid_range } = *self;
2199 value.hash_stable(hcx, hasher);
2200 valid_range.start().hash_stable(hcx, hasher);
2201 valid_range.end().hash_stable(hcx, hasher);
2205 impl_stable_hash_for!(struct crate::ty::layout::LayoutDetails {
2213 impl_stable_hash_for!(enum crate::ty::layout::Integer {
2221 impl_stable_hash_for!(enum crate::ty::layout::Primitive {
2222 Int(integer, signed),
2227 impl_stable_hash_for!(struct crate::ty::layout::AbiAndPrefAlign {
2232 impl<'gcx> HashStable<StableHashingContext<'gcx>> for Align {
2233 fn hash_stable<W: StableHasherResult>(&self,
2234 hcx: &mut StableHashingContext<'gcx>,
2235 hasher: &mut StableHasher<W>) {
2236 self.bytes().hash_stable(hcx, hasher);
2240 impl<'gcx> HashStable<StableHashingContext<'gcx>> for Size {
2241 fn hash_stable<W: StableHasherResult>(&self,
2242 hcx: &mut StableHashingContext<'gcx>,
2243 hasher: &mut StableHasher<W>) {
2244 self.bytes().hash_stable(hcx, hasher);
2248 impl<'a, 'gcx> HashStable<StableHashingContext<'a>> for LayoutError<'gcx>
2250 fn hash_stable<W: StableHasherResult>(&self,
2251 hcx: &mut StableHashingContext<'a>,
2252 hasher: &mut StableHasher<W>) {
2253 use crate::ty::layout::LayoutError::*;
2254 mem::discriminant(self).hash_stable(hcx, hasher);
2258 SizeOverflow(t) => t.hash_stable(hcx, hasher)