1 // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 use session::{self, DataTypeKind};
12 use ty::{self, Ty, TyCtxt, TypeFoldable, ReprOptions};
14 use syntax::ast::{self, IntTy, UintTy};
16 use syntax_pos::DUMMY_SP;
24 use ich::StableHashingContext;
25 use rustc_data_structures::stable_hasher::{HashStable, StableHasher,
28 pub use rustc_target::abi::*;
30 pub trait IntegerExt {
31 fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, signed: bool) -> Ty<'tcx>;
32 fn from_attr<C: HasDataLayout>(cx: C, ity: attr::IntType) -> Integer;
33 fn repr_discr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
41 impl IntegerExt for Integer {
42 fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, signed: bool) -> Ty<'tcx> {
43 match (*self, signed) {
44 (I8, false) => tcx.types.u8,
45 (I16, false) => tcx.types.u16,
46 (I32, false) => tcx.types.u32,
47 (I64, false) => tcx.types.u64,
48 (I128, false) => tcx.types.u128,
49 (I8, true) => tcx.types.i8,
50 (I16, true) => tcx.types.i16,
51 (I32, true) => tcx.types.i32,
52 (I64, true) => tcx.types.i64,
53 (I128, true) => tcx.types.i128,
57 /// Get the Integer type from an attr::IntType.
58 fn from_attr<C: HasDataLayout>(cx: C, ity: attr::IntType) -> Integer {
59 let dl = cx.data_layout();
62 attr::SignedInt(IntTy::I8) | attr::UnsignedInt(UintTy::U8) => I8,
63 attr::SignedInt(IntTy::I16) | attr::UnsignedInt(UintTy::U16) => I16,
64 attr::SignedInt(IntTy::I32) | attr::UnsignedInt(UintTy::U32) => I32,
65 attr::SignedInt(IntTy::I64) | attr::UnsignedInt(UintTy::U64) => I64,
66 attr::SignedInt(IntTy::I128) | attr::UnsignedInt(UintTy::U128) => I128,
67 attr::SignedInt(IntTy::Isize) | attr::UnsignedInt(UintTy::Usize) => {
68 dl.ptr_sized_integer()
73 /// Find the appropriate Integer type and signedness for the given
74 /// signed discriminant range and #[repr] attribute.
75 /// N.B.: u128 values above i128::MAX will be treated as signed, but
76 /// that shouldn't affect anything, other than maybe debuginfo.
77 fn repr_discr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
83 // Theoretically, negative values could be larger in unsigned representation
84 // than the unsigned representation of the signed minimum. However, if there
85 // are any negative values, the only valid unsigned representation is u128
86 // which can fit all i128 values, so the result remains unaffected.
87 let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
88 let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
90 let mut min_from_extern = None;
93 if let Some(ity) = repr.int {
94 let discr = Integer::from_attr(tcx, ity);
95 let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
97 bug!("Integer::repr_discr: `#[repr]` hint too small for \
98 discriminant range of enum `{}", ty)
100 return (discr, ity.is_signed());
104 match &tcx.sess.target.target.arch[..] {
105 // WARNING: the ARM EABI has two variants; the one corresponding
106 // to `at_least == I32` appears to be used on Linux and NetBSD,
107 // but some systems may use the variant corresponding to no
108 // lower bound. However, we don't run on those yet...?
109 "arm" => min_from_extern = Some(I32),
110 _ => min_from_extern = Some(I32),
114 let at_least = min_from_extern.unwrap_or(min_default);
116 // If there are no negative values, we can use the unsigned fit.
118 (cmp::max(unsigned_fit, at_least), false)
120 (cmp::max(signed_fit, at_least), true)
125 pub trait PrimitiveExt {
126 fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx>;
129 impl PrimitiveExt for Primitive {
130 fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx> {
132 Int(i, signed) => i.to_ty(tcx, signed),
133 Float(FloatTy::F32) => tcx.types.f32,
134 Float(FloatTy::F64) => tcx.types.f64,
135 Pointer => tcx.mk_mut_ptr(tcx.mk_nil()),
140 /// The first half of a fat pointer.
142 /// - For a trait object, this is the address of the box.
143 /// - For a slice, this is the base address.
144 pub const FAT_PTR_ADDR: usize = 0;
146 /// The second half of a fat pointer.
148 /// - For a trait object, this is the address of the vtable.
149 /// - For a slice, this is the length.
150 pub const FAT_PTR_EXTRA: usize = 1;
152 #[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable)]
153 pub enum LayoutError<'tcx> {
155 SizeOverflow(Ty<'tcx>)
158 impl<'tcx> fmt::Display for LayoutError<'tcx> {
159 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
161 LayoutError::Unknown(ty) => {
162 write!(f, "the type `{:?}` has an unknown layout", ty)
164 LayoutError::SizeOverflow(ty) => {
165 write!(f, "the type `{:?}` is too big for the current architecture", ty)
171 fn layout_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
172 query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
173 -> Result<&'tcx LayoutDetails, LayoutError<'tcx>>
175 ty::tls::with_related_context(tcx, move |icx| {
176 let rec_limit = *tcx.sess.recursion_limit.get();
177 let (param_env, ty) = query.into_parts();
179 if icx.layout_depth > rec_limit {
181 &format!("overflow representing the type `{}`", ty));
184 // Update the ImplicitCtxt to increase the layout_depth
185 let icx = ty::tls::ImplicitCtxt {
186 layout_depth: icx.layout_depth + 1,
190 ty::tls::enter_context(&icx, |_| {
191 let cx = LayoutCx { tcx, param_env };
192 cx.layout_raw_uncached(ty)
197 pub fn provide(providers: &mut ty::query::Providers) {
198 *providers = ty::query::Providers {
204 #[derive(Copy, Clone)]
205 pub struct LayoutCx<'tcx, C> {
207 pub param_env: ty::ParamEnv<'tcx>
210 impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
211 fn layout_raw_uncached(self, ty: Ty<'tcx>)
212 -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> {
214 let param_env = self.param_env;
215 let dl = self.data_layout();
216 let scalar_unit = |value: Primitive| {
217 let bits = value.size(dl).bits();
218 assert!(bits <= 128);
221 valid_range: 0..=(!0 >> (128 - bits))
224 let scalar = |value: Primitive| {
225 tcx.intern_layout(LayoutDetails::scalar(self, scalar_unit(value)))
227 let scalar_pair = |a: Scalar, b: Scalar| {
228 let align = a.value.align(dl).max(b.value.align(dl)).max(dl.aggregate_align);
229 let b_offset = a.value.size(dl).abi_align(b.value.align(dl));
230 let size = (b_offset + b.value.size(dl)).abi_align(align);
232 variants: Variants::Single { index: 0 },
233 fields: FieldPlacement::Arbitrary {
234 offsets: vec![Size::ZERO, b_offset],
235 memory_index: vec![0, 1]
237 abi: Abi::ScalarPair(a, b),
243 #[derive(Copy, Clone, Debug)]
245 /// A tuple, closure, or univariant which cannot be coerced to unsized.
247 /// A univariant, the last field of which may be coerced to unsized.
249 /// A univariant, but with a prefix of an arbitrary size & alignment (e.g. enum tag).
250 Prefixed(Size, Align),
252 let univariant_uninterned = |fields: &[TyLayout], repr: &ReprOptions, kind| {
253 let packed = repr.packed();
254 if packed && repr.align > 0 {
255 bug!("struct cannot be packed and aligned");
259 let pack = repr.pack as u64;
260 Align::from_bytes(pack, pack).unwrap()
263 let mut align = if packed {
269 let mut sized = true;
270 let mut offsets = vec![Size::ZERO; fields.len()];
271 let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
273 let mut optimize = !repr.inhibit_struct_field_reordering_opt();
274 if let StructKind::Prefixed(_, align) = kind {
275 optimize &= align.abi() == 1;
279 let end = if let StructKind::MaybeUnsized = kind {
284 let optimizing = &mut inverse_memory_index[..end];
285 let field_align = |f: &TyLayout| {
286 if packed { f.align.min(pack).abi() } else { f.align.abi() }
289 StructKind::AlwaysSized |
290 StructKind::MaybeUnsized => {
291 optimizing.sort_by_key(|&x| {
292 // Place ZSTs first to avoid "interesting offsets",
293 // especially with only one or two non-ZST fields.
294 let f = &fields[x as usize];
295 (!f.is_zst(), cmp::Reverse(field_align(f)))
298 StructKind::Prefixed(..) => {
299 optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
304 // inverse_memory_index holds field indices by increasing memory offset.
305 // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
306 // We now write field offsets to the corresponding offset slot;
307 // field 5 with offset 0 puts 0 in offsets[5].
308 // At the bottom of this function, we use inverse_memory_index to produce memory_index.
310 let mut offset = Size::ZERO;
312 if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
314 let prefix_align = prefix_align.min(pack);
315 align = align.max(prefix_align);
317 align = align.max(prefix_align);
319 offset = prefix_size.abi_align(prefix_align);
322 for &i in &inverse_memory_index {
323 let field = fields[i as usize];
325 bug!("univariant: field #{} of `{}` comes after unsized field",
329 if field.is_unsized() {
333 // Invariant: offset < dl.obj_size_bound() <= 1<<61
335 let field_pack = field.align.min(pack);
336 offset = offset.abi_align(field_pack);
337 align = align.max(field_pack);
340 offset = offset.abi_align(field.align);
341 align = align.max(field.align);
344 debug!("univariant offset: {:?} field: {:#?}", offset, field);
345 offsets[i as usize] = offset;
347 offset = offset.checked_add(field.size, dl)
348 .ok_or(LayoutError::SizeOverflow(ty))?;
352 let repr_align = repr.align as u64;
353 align = align.max(Align::from_bytes(repr_align, repr_align).unwrap());
354 debug!("univariant repr_align: {:?}", repr_align);
357 debug!("univariant min_size: {:?}", offset);
358 let min_size = offset;
360 // As stated above, inverse_memory_index holds field indices by increasing offset.
361 // This makes it an already-sorted view of the offsets vec.
362 // To invert it, consider:
363 // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
364 // Field 5 would be the first element, so memory_index is i:
365 // Note: if we didn't optimize, it's already right.
367 let mut memory_index;
369 memory_index = vec![0; inverse_memory_index.len()];
371 for i in 0..inverse_memory_index.len() {
372 memory_index[inverse_memory_index[i] as usize] = i as u32;
375 memory_index = inverse_memory_index;
378 let size = min_size.abi_align(align);
379 let mut abi = Abi::Aggregate { sized };
381 // Unpack newtype ABIs and find scalar pairs.
382 if sized && size.bytes() > 0 {
383 // All other fields must be ZSTs, and we need them to all start at 0.
384 let mut zst_offsets =
385 offsets.iter().enumerate().filter(|&(i, _)| fields[i].is_zst());
386 if zst_offsets.all(|(_, o)| o.bytes() == 0) {
387 let mut non_zst_fields =
388 fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
390 match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
391 // We have exactly one non-ZST field.
392 (Some((i, field)), None, None) => {
393 // Field fills the struct and it has a scalar or scalar pair ABI.
394 if offsets[i].bytes() == 0 &&
395 align.abi() == field.align.abi() &&
398 // For plain scalars, or vectors of them, we can't unpack
399 // newtypes for `#[repr(C)]`, as that affects C ABIs.
400 Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
401 abi = field.abi.clone();
403 // But scalar pairs are Rust-specific and get
404 // treated as aggregates by C ABIs anyway.
405 Abi::ScalarPair(..) => {
406 abi = field.abi.clone();
413 // Two non-ZST fields, and they're both scalars.
414 (Some((i, &TyLayout {
415 details: &LayoutDetails { abi: Abi::Scalar(ref a), .. }, ..
416 })), Some((j, &TyLayout {
417 details: &LayoutDetails { abi: Abi::Scalar(ref b), .. }, ..
419 // Order by the memory placement, not source order.
420 let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
425 let pair = scalar_pair(a.clone(), b.clone());
426 let pair_offsets = match pair.fields {
427 FieldPlacement::Arbitrary {
431 assert_eq!(memory_index, &[0, 1]);
436 if offsets[i] == pair_offsets[0] &&
437 offsets[j] == pair_offsets[1] &&
438 align == pair.align &&
440 // We can use `ScalarPair` only when it matches our
441 // already computed layout (including `#[repr(C)]`).
451 if sized && fields.iter().any(|f| f.abi == Abi::Uninhabited) {
452 abi = Abi::Uninhabited;
456 variants: Variants::Single { index: 0 },
457 fields: FieldPlacement::Arbitrary {
466 let univariant = |fields: &[TyLayout], repr: &ReprOptions, kind| {
467 Ok(tcx.intern_layout(univariant_uninterned(fields, repr, kind)?))
469 debug_assert!(!ty.has_infer_types());
474 tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
475 value: Int(I8, false),
480 tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
481 value: Int(I32, false),
482 valid_range: 0..=0x10FFFF
486 scalar(Int(Integer::from_attr(dl, attr::SignedInt(ity)), true))
489 scalar(Int(Integer::from_attr(dl, attr::UnsignedInt(ity)), false))
491 ty::TyFloat(fty) => scalar(Float(fty)),
493 let mut ptr = scalar_unit(Pointer);
494 ptr.valid_range = 1..=*ptr.valid_range.end();
495 tcx.intern_layout(LayoutDetails::scalar(self, ptr))
500 tcx.intern_layout(LayoutDetails {
501 variants: Variants::Single { index: 0 },
502 fields: FieldPlacement::Union(0),
503 abi: Abi::Uninhabited,
509 // Potentially-fat pointers.
510 ty::TyRef(_, pointee, _) |
511 ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
512 let mut data_ptr = scalar_unit(Pointer);
513 if !ty.is_unsafe_ptr() {
514 data_ptr.valid_range = 1..=*data_ptr.valid_range.end();
517 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
518 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
519 return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
522 let unsized_part = tcx.struct_tail(pointee);
523 let metadata = match unsized_part.sty {
524 ty::TyForeign(..) => {
525 return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
527 ty::TySlice(_) | ty::TyStr => {
528 scalar_unit(Int(dl.ptr_sized_integer(), false))
530 ty::TyDynamic(..) => {
531 let mut vtable = scalar_unit(Pointer);
532 vtable.valid_range = 1..=*vtable.valid_range.end();
535 _ => return Err(LayoutError::Unknown(unsized_part))
538 // Effectively a (ptr, meta) tuple.
539 tcx.intern_layout(scalar_pair(data_ptr, metadata))
542 // Arrays and slices.
543 ty::TyArray(element, mut count) => {
544 if count.has_projections() {
545 count = tcx.normalize_erasing_regions(param_env, count);
546 if count.has_projections() {
547 return Err(LayoutError::Unknown(ty));
551 let element = self.layout_of(element)?;
552 let count = count.unwrap_usize(tcx);
553 let size = element.size.checked_mul(count, dl)
554 .ok_or(LayoutError::SizeOverflow(ty))?;
556 tcx.intern_layout(LayoutDetails {
557 variants: Variants::Single { index: 0 },
558 fields: FieldPlacement::Array {
559 stride: element.size,
562 abi: Abi::Aggregate { sized: true },
563 align: element.align,
567 ty::TySlice(element) => {
568 let element = self.layout_of(element)?;
569 tcx.intern_layout(LayoutDetails {
570 variants: Variants::Single { index: 0 },
571 fields: FieldPlacement::Array {
572 stride: element.size,
575 abi: Abi::Aggregate { sized: false },
576 align: element.align,
581 tcx.intern_layout(LayoutDetails {
582 variants: Variants::Single { index: 0 },
583 fields: FieldPlacement::Array {
584 stride: Size::from_bytes(1),
587 abi: Abi::Aggregate { sized: false },
595 univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?
597 ty::TyDynamic(..) | ty::TyForeign(..) => {
598 let mut unit = univariant_uninterned(&[], &ReprOptions::default(),
599 StructKind::AlwaysSized)?;
601 Abi::Aggregate { ref mut sized } => *sized = false,
604 tcx.intern_layout(unit)
607 // Tuples, generators and closures.
608 ty::TyGenerator(def_id, ref substs, _) => {
609 let tys = substs.field_tys(def_id, tcx);
610 univariant(&tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
611 &ReprOptions::default(),
612 StructKind::AlwaysSized)?
615 ty::TyClosure(def_id, ref substs) => {
616 let tys = substs.upvar_tys(def_id, tcx);
617 univariant(&tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
618 &ReprOptions::default(),
619 StructKind::AlwaysSized)?
622 ty::TyTuple(tys) => {
623 let kind = if tys.len() == 0 {
624 StructKind::AlwaysSized
626 StructKind::MaybeUnsized
629 univariant(&tys.iter().map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
630 &ReprOptions::default(), kind)?
633 // SIMD vector types.
634 ty::TyAdt(def, ..) if def.repr.simd() => {
635 let element = self.layout_of(ty.simd_type(tcx))?;
636 let count = ty.simd_size(tcx) as u64;
638 let scalar = match element.abi {
639 Abi::Scalar(ref scalar) => scalar.clone(),
641 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` with \
642 a non-machine element type `{}`",
646 let size = element.size.checked_mul(count, dl)
647 .ok_or(LayoutError::SizeOverflow(ty))?;
648 let align = dl.vector_align(size);
649 let size = size.abi_align(align);
651 tcx.intern_layout(LayoutDetails {
652 variants: Variants::Single { index: 0 },
653 fields: FieldPlacement::Array {
654 stride: element.size,
667 ty::TyAdt(def, substs) => {
668 // Cache the field layouts.
669 let variants = def.variants.iter().map(|v| {
670 v.fields.iter().map(|field| {
671 self.layout_of(field.ty(tcx, substs))
672 }).collect::<Result<Vec<_>, _>>()
673 }).collect::<Result<Vec<_>, _>>()?;
676 let packed = def.repr.packed();
677 if packed && def.repr.align > 0 {
678 bug!("Union cannot be packed and aligned");
682 let pack = def.repr.pack as u64;
683 Align::from_bytes(pack, pack).unwrap()
686 let mut align = if packed {
692 if def.repr.align > 0 {
693 let repr_align = def.repr.align as u64;
695 Align::from_bytes(repr_align, repr_align).unwrap());
698 let mut size = Size::ZERO;
699 for field in &variants[0] {
700 assert!(!field.is_unsized());
703 let field_pack = field.align.min(pack);
704 align = align.max(field_pack);
706 align = align.max(field.align);
708 size = cmp::max(size, field.size);
711 return Ok(tcx.intern_layout(LayoutDetails {
712 variants: Variants::Single { index: 0 },
713 fields: FieldPlacement::Union(variants[0].len()),
714 abi: Abi::Aggregate { sized: true },
716 size: size.abi_align(align)
720 // A variant is absent if it's uninhabited and only has ZST fields.
721 // Present uninhabited variants only require space for their fields,
722 // but *not* an encoding of the discriminant (e.g. a tag value).
723 // See issue #49298 for more details on the need to leave space
724 // for non-ZST uninhabited data (mostly partial initialization).
725 let absent = |fields: &[TyLayout]| {
726 let uninhabited = fields.iter().any(|f| f.abi == Abi::Uninhabited);
727 let is_zst = fields.iter().all(|f| f.is_zst());
728 uninhabited && is_zst
730 let (present_first, present_second) = {
731 let mut present_variants = (0..variants.len()).filter(|&v| {
732 !absent(&variants[v])
734 (present_variants.next(), present_variants.next())
736 if present_first.is_none() {
737 // Uninhabited because it has no variants, or only absent ones.
738 return tcx.layout_raw(param_env.and(tcx.types.never));
741 let is_struct = !def.is_enum() ||
742 // Only one variant is present.
743 (present_second.is_none() &&
744 // Representation optimizations are allowed.
745 !def.repr.inhibit_enum_layout_opt());
747 // Struct, or univariant enum equivalent to a struct.
748 // (Typechecking will reject discriminant-sizing attrs.)
750 let v = present_first.unwrap();
751 let kind = if def.is_enum() || variants[v].len() == 0 {
752 StructKind::AlwaysSized
754 let param_env = tcx.param_env(def.did);
755 let last_field = def.variants[v].fields.last().unwrap();
756 let always_sized = tcx.type_of(last_field.did)
757 .is_sized(tcx.at(DUMMY_SP), param_env);
758 if !always_sized { StructKind::MaybeUnsized }
759 else { StructKind::AlwaysSized }
762 let mut st = univariant_uninterned(&variants[v], &def.repr, kind)?;
763 st.variants = Variants::Single { index: v };
764 // Exclude 0 from the range of a newtype ABI NonZero<T>.
765 if Some(def.did) == self.tcx.lang_items().non_zero() {
767 Abi::Scalar(ref mut scalar) |
768 Abi::ScalarPair(ref mut scalar, _) => {
769 if *scalar.valid_range.start() == 0 {
770 scalar.valid_range = 1..=*scalar.valid_range.end();
776 return Ok(tcx.intern_layout(st));
779 // The current code for niche-filling relies on variant indices
780 // instead of actual discriminants, so dataful enums with
781 // explicit discriminants (RFC #2363) would misbehave.
782 let no_explicit_discriminants = def.variants.iter().enumerate()
783 .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i));
785 // Niche-filling enum optimization.
786 if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
787 let mut dataful_variant = None;
788 let mut niche_variants = usize::max_value()..=0;
790 // Find one non-ZST variant.
791 'variants: for (v, fields) in variants.iter().enumerate() {
797 if dataful_variant.is_none() {
798 dataful_variant = Some(v);
801 dataful_variant = None;
806 niche_variants = *niche_variants.start().min(&v)..=v;
809 if niche_variants.start() > niche_variants.end() {
810 dataful_variant = None;
813 if let Some(i) = dataful_variant {
814 let count = (niche_variants.end() - niche_variants.start() + 1) as u128;
815 for (field_index, &field) in variants[i].iter().enumerate() {
816 let niche = match self.find_niche(field)? {
817 Some(niche) => niche,
820 let (niche_start, niche_scalar) = match niche.reserve(self, count) {
825 let mut align = dl.aggregate_align;
826 let st = variants.iter().enumerate().map(|(j, v)| {
827 let mut st = univariant_uninterned(v,
828 &def.repr, StructKind::AlwaysSized)?;
829 st.variants = Variants::Single { index: j };
831 align = align.max(st.align);
834 }).collect::<Result<Vec<_>, _>>()?;
836 let offset = st[i].fields.offset(field_index) + niche.offset;
837 let size = st[i].size;
839 let mut abi = match st[i].abi {
840 Abi::Scalar(_) => Abi::Scalar(niche_scalar.clone()),
841 Abi::ScalarPair(ref first, ref second) => {
842 // We need to use scalar_unit to reset the
843 // valid range to the maximal one for that
844 // primitive, because only the niche is
845 // guaranteed to be initialised, not the
847 if offset.bytes() == 0 {
849 niche_scalar.clone(),
850 scalar_unit(second.value),
854 scalar_unit(first.value),
855 niche_scalar.clone(),
859 _ => Abi::Aggregate { sized: true },
862 if st.iter().all(|v| v.abi == Abi::Uninhabited) {
863 abi = Abi::Uninhabited;
866 return Ok(tcx.intern_layout(LayoutDetails {
867 variants: Variants::NicheFilling {
874 fields: FieldPlacement::Arbitrary {
875 offsets: vec![offset],
876 memory_index: vec![0]
886 let (mut min, mut max) = (i128::max_value(), i128::min_value());
887 let discr_type = def.repr.discr_type();
888 let bits = Integer::from_attr(tcx, discr_type).size().bits();
889 for (i, discr) in def.discriminants(tcx).enumerate() {
890 if variants[i].iter().any(|f| f.abi == Abi::Uninhabited) {
893 let mut x = discr.val as i128;
894 if discr_type.is_signed() {
895 // sign extend the raw representation to be an i128
896 x = (x << (128 - bits)) >> (128 - bits);
898 if x < min { min = x; }
899 if x > max { max = x; }
901 // We might have no inhabited variants, so pretend there's at least one.
902 if (min, max) == (i128::max_value(), i128::min_value()) {
906 assert!(min <= max, "discriminant range is {}...{}", min, max);
907 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
909 let mut align = dl.aggregate_align;
910 let mut size = Size::ZERO;
912 // We're interested in the smallest alignment, so start large.
913 let mut start_align = Align::from_bytes(256, 256).unwrap();
914 assert_eq!(Integer::for_abi_align(dl, start_align), None);
916 // repr(C) on an enum tells us to make a (tag, union) layout,
917 // so we need to grow the prefix alignment to be at least
918 // the alignment of the union. (This value is used both for
919 // determining the alignment of the overall enum, and the
920 // determining the alignment of the payload after the tag.)
921 let mut prefix_align = min_ity.align(dl);
923 for fields in &variants {
924 for field in fields {
925 prefix_align = prefix_align.max(field.align);
930 // Create the set of structs that represent each variant.
931 let mut layout_variants = variants.iter().enumerate().map(|(i, field_layouts)| {
932 let mut st = univariant_uninterned(&field_layouts,
933 &def.repr, StructKind::Prefixed(min_ity.size(), prefix_align))?;
934 st.variants = Variants::Single { index: i };
935 // Find the first field we can't move later
936 // to make room for a larger discriminant.
937 for field in st.fields.index_by_increasing_offset().map(|j| field_layouts[j]) {
938 if !field.is_zst() || field.align.abi() != 1 {
939 start_align = start_align.min(field.align);
943 size = cmp::max(size, st.size);
944 align = align.max(st.align);
946 }).collect::<Result<Vec<_>, _>>()?;
948 // Align the maximum variant size to the largest alignment.
949 size = size.abi_align(align);
951 if size.bytes() >= dl.obj_size_bound() {
952 return Err(LayoutError::SizeOverflow(ty));
955 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
956 if typeck_ity < min_ity {
957 // It is a bug if Layout decided on a greater discriminant size than typeck for
958 // some reason at this point (based on values discriminant can take on). Mostly
959 // because this discriminant will be loaded, and then stored into variable of
960 // type calculated by typeck. Consider such case (a bug): typeck decided on
961 // byte-sized discriminant, but layout thinks we need a 16-bit to store all
962 // discriminant values. That would be a bug, because then, in codegen, in order
963 // to store this 16-bit discriminant into 8-bit sized temporary some of the
964 // space necessary to represent would have to be discarded (or layout is wrong
965 // on thinking it needs 16 bits)
966 bug!("layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
967 min_ity, typeck_ity);
968 // However, it is fine to make discr type however large (as an optimisation)
969 // after this point – we’ll just truncate the value we load in codegen.
972 // Check to see if we should use a different type for the
973 // discriminant. We can safely use a type with the same size
974 // as the alignment of the first field of each variant.
975 // We increase the size of the discriminant to avoid LLVM copying
976 // padding when it doesn't need to. This normally causes unaligned
977 // load/stores and excessive memcpy/memset operations. By using a
978 // bigger integer size, LLVM can be sure about its contents and
979 // won't be so conservative.
981 // Use the initial field alignment
982 let mut ity = if def.repr.c() || def.repr.int.is_some() {
985 Integer::for_abi_align(dl, start_align).unwrap_or(min_ity)
988 // If the alignment is not larger than the chosen discriminant size,
989 // don't use the alignment as the final size.
993 // Patch up the variants' first few fields.
994 let old_ity_size = min_ity.size();
995 let new_ity_size = ity.size();
996 for variant in &mut layout_variants {
997 match variant.fields {
998 FieldPlacement::Arbitrary { ref mut offsets, .. } => {
1000 if *i <= old_ity_size {
1001 assert_eq!(*i, old_ity_size);
1005 // We might be making the struct larger.
1006 if variant.size <= old_ity_size {
1007 variant.size = new_ity_size;
1015 let tag_mask = !0u128 >> (128 - ity.size().bits());
1017 value: Int(ity, signed),
1018 valid_range: (min as u128 & tag_mask)..=(max as u128 & tag_mask),
1020 let mut abi = Abi::Aggregate { sized: true };
1021 if tag.value.size(dl) == size {
1022 abi = Abi::Scalar(tag.clone());
1024 // Try to use a ScalarPair for all tagged enums.
1025 let mut common_prim = None;
1026 for (field_layouts, layout_variant) in variants.iter().zip(&layout_variants) {
1027 let offsets = match layout_variant.fields {
1028 FieldPlacement::Arbitrary { ref offsets, .. } => offsets,
1031 let mut fields = field_layouts
1034 .filter(|p| !p.0.is_zst());
1035 let (field, offset) = match (fields.next(), fields.next()) {
1036 (None, None) => continue,
1037 (Some(pair), None) => pair,
1043 let prim = match field.details.abi {
1044 Abi::Scalar(ref scalar) => scalar.value,
1050 if let Some(pair) = common_prim {
1051 // This is pretty conservative. We could go fancier
1052 // by conflating things like i32 and u32, or even
1053 // realising that (u8, u8) could just cohabit with
1055 if pair != (prim, offset) {
1060 common_prim = Some((prim, offset));
1063 if let Some((prim, offset)) = common_prim {
1064 let pair = scalar_pair(tag.clone(), scalar_unit(prim));
1065 let pair_offsets = match pair.fields {
1066 FieldPlacement::Arbitrary {
1070 assert_eq!(memory_index, &[0, 1]);
1075 if pair_offsets[0] == Size::ZERO &&
1076 pair_offsets[1] == *offset &&
1077 align == pair.align &&
1079 // We can use `ScalarPair` only when it matches our
1080 // already computed layout (including `#[repr(C)]`).
1086 if layout_variants.iter().all(|v| v.abi == Abi::Uninhabited) {
1087 abi = Abi::Uninhabited;
1090 tcx.intern_layout(LayoutDetails {
1091 variants: Variants::Tagged {
1093 variants: layout_variants,
1095 fields: FieldPlacement::Arbitrary {
1096 offsets: vec![Size::ZERO],
1097 memory_index: vec![0]
1105 // Types with no meaningful known layout.
1106 ty::TyProjection(_) | ty::TyAnon(..) => {
1107 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1108 if ty == normalized {
1109 return Err(LayoutError::Unknown(ty));
1111 tcx.layout_raw(param_env.and(normalized))?
1113 ty::TyGeneratorWitness(..) | ty::TyInfer(_) => {
1114 bug!("LayoutDetails::compute: unexpected type `{}`", ty)
1116 ty::TyParam(_) | ty::TyError => {
1117 return Err(LayoutError::Unknown(ty));
1122 /// This is invoked by the `layout_raw` query to record the final
1123 /// layout of each type.
1125 fn record_layout_for_printing(self, layout: TyLayout<'tcx>) {
1126 // If we are running with `-Zprint-type-sizes`, record layouts for
1127 // dumping later. Ignore layouts that are done with non-empty
1128 // environments or non-monomorphic layouts, as the user only wants
1129 // to see the stuff resulting from the final codegen session.
1131 !self.tcx.sess.opts.debugging_opts.print_type_sizes ||
1132 layout.ty.has_param_types() ||
1133 layout.ty.has_self_ty() ||
1134 !self.param_env.caller_bounds.is_empty()
1139 self.record_layout_for_printing_outlined(layout)
1142 fn record_layout_for_printing_outlined(self, layout: TyLayout<'tcx>) {
1143 // (delay format until we actually need it)
1144 let record = |kind, packed, opt_discr_size, variants| {
1145 let type_desc = format!("{:?}", layout.ty);
1146 self.tcx.sess.code_stats.borrow_mut().record_type_size(kind,
1155 let adt_def = match layout.ty.sty {
1156 ty::TyAdt(ref adt_def, _) => {
1157 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1161 ty::TyClosure(..) => {
1162 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1163 record(DataTypeKind::Closure, false, None, vec![]);
1168 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1173 let adt_kind = adt_def.adt_kind();
1174 let adt_packed = adt_def.repr.packed();
1176 let build_variant_info = |n: Option<ast::Name>,
1178 layout: TyLayout<'tcx>| {
1179 let mut min_size = Size::ZERO;
1180 let field_info: Vec<_> = flds.iter().enumerate().map(|(i, &name)| {
1181 match layout.field(self, i) {
1183 bug!("no layout found for field {}: `{:?}`", name, err);
1185 Ok(field_layout) => {
1186 let offset = layout.fields.offset(i);
1187 let field_end = offset + field_layout.size;
1188 if min_size < field_end {
1189 min_size = field_end;
1191 session::FieldInfo {
1192 name: name.to_string(),
1193 offset: offset.bytes(),
1194 size: field_layout.size.bytes(),
1195 align: field_layout.align.abi(),
1201 session::VariantInfo {
1202 name: n.map(|n|n.to_string()),
1203 kind: if layout.is_unsized() {
1204 session::SizeKind::Min
1206 session::SizeKind::Exact
1208 align: layout.align.abi(),
1209 size: if min_size.bytes() == 0 {
1218 match layout.variants {
1219 Variants::Single { index } => {
1220 debug!("print-type-size `{:#?}` variant {}",
1221 layout, adt_def.variants[index].name);
1222 if !adt_def.variants.is_empty() {
1223 let variant_def = &adt_def.variants[index];
1224 let fields: Vec<_> =
1225 variant_def.fields.iter().map(|f| f.ident.name).collect();
1226 record(adt_kind.into(),
1229 vec![build_variant_info(Some(variant_def.name),
1233 // (This case arises for *empty* enums; so give it
1235 record(adt_kind.into(), adt_packed, None, vec![]);
1239 Variants::NicheFilling { .. } |
1240 Variants::Tagged { .. } => {
1241 debug!("print-type-size `{:#?}` adt general variants def {}",
1242 layout.ty, adt_def.variants.len());
1243 let variant_infos: Vec<_> =
1244 adt_def.variants.iter().enumerate().map(|(i, variant_def)| {
1245 let fields: Vec<_> =
1246 variant_def.fields.iter().map(|f| f.ident.name).collect();
1247 build_variant_info(Some(variant_def.name),
1249 layout.for_variant(self, i))
1252 record(adt_kind.into(), adt_packed, match layout.variants {
1253 Variants::Tagged { ref tag, .. } => Some(tag.value.size(self)),
1261 /// Type size "skeleton", i.e. the only information determining a type's size.
1262 /// While this is conservative, (aside from constant sizes, only pointers,
1263 /// newtypes thereof and null pointer optimized enums are allowed), it is
1264 /// enough to statically check common usecases of transmute.
1265 #[derive(Copy, Clone, Debug)]
1266 pub enum SizeSkeleton<'tcx> {
1267 /// Any statically computable Layout.
1270 /// A potentially-fat pointer.
1272 /// If true, this pointer is never null.
1274 /// The type which determines the unsized metadata, if any,
1275 /// of this pointer. Either a type parameter or a projection
1276 /// depending on one, with regions erased.
1281 impl<'a, 'tcx> SizeSkeleton<'tcx> {
1282 pub fn compute(ty: Ty<'tcx>,
1283 tcx: TyCtxt<'a, 'tcx, 'tcx>,
1284 param_env: ty::ParamEnv<'tcx>)
1285 -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1286 debug_assert!(!ty.has_infer_types());
1288 // First try computing a static layout.
1289 let err = match tcx.layout_of(param_env.and(ty)) {
1291 return Ok(SizeSkeleton::Known(layout.size));
1297 ty::TyRef(_, pointee, _) |
1298 ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1299 let non_zero = !ty.is_unsafe_ptr();
1300 let tail = tcx.struct_tail(pointee);
1302 ty::TyParam(_) | ty::TyProjection(_) => {
1303 debug_assert!(tail.has_param_types() || tail.has_self_ty());
1304 Ok(SizeSkeleton::Pointer {
1306 tail: tcx.erase_regions(&tail)
1310 bug!("SizeSkeleton::compute({}): layout errored ({}), yet \
1311 tail `{}` is not a type parameter or a projection",
1317 ty::TyAdt(def, substs) => {
1318 // Only newtypes and enums w/ nullable pointer optimization.
1319 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
1323 // Get a zero-sized variant or a pointer newtype.
1324 let zero_or_ptr_variant = |i: usize| {
1325 let fields = def.variants[i].fields.iter().map(|field| {
1326 SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env)
1329 for field in fields {
1332 SizeSkeleton::Known(size) => {
1333 if size.bytes() > 0 {
1337 SizeSkeleton::Pointer {..} => {
1348 let v0 = zero_or_ptr_variant(0)?;
1350 if def.variants.len() == 1 {
1351 if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1352 return Ok(SizeSkeleton::Pointer {
1353 non_zero: non_zero ||
1354 Some(def.did) == tcx.lang_items().non_zero(),
1362 let v1 = zero_or_ptr_variant(1)?;
1363 // Nullable pointer enum optimization.
1365 (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None) |
1366 (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
1367 Ok(SizeSkeleton::Pointer {
1376 ty::TyProjection(_) | ty::TyAnon(..) => {
1377 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1378 if ty == normalized {
1381 SizeSkeleton::compute(normalized, tcx, param_env)
1389 pub fn same_size(self, other: SizeSkeleton) -> bool {
1390 match (self, other) {
1391 (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
1392 (SizeSkeleton::Pointer { tail: a, .. },
1393 SizeSkeleton::Pointer { tail: b, .. }) => a == b,
1399 pub trait HasTyCtxt<'tcx>: HasDataLayout {
1400 fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx>;
1403 impl<'a, 'gcx, 'tcx> HasDataLayout for TyCtxt<'a, 'gcx, 'tcx> {
1404 fn data_layout(&self) -> &TargetDataLayout {
1409 impl<'a, 'gcx, 'tcx> HasTyCtxt<'gcx> for TyCtxt<'a, 'gcx, 'tcx> {
1410 fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> {
1415 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
1416 fn data_layout(&self) -> &TargetDataLayout {
1417 self.tcx.data_layout()
1421 impl<'gcx, 'tcx, T: HasTyCtxt<'gcx>> HasTyCtxt<'gcx> for LayoutCx<'tcx, T> {
1422 fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> {
1427 pub trait MaybeResult<T> {
1428 fn from_ok(x: T) -> Self;
1429 fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self;
1432 impl<T> MaybeResult<T> for T {
1433 fn from_ok(x: T) -> Self {
1436 fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self {
1441 impl<T, E> MaybeResult<T> for Result<T, E> {
1442 fn from_ok(x: T) -> Self {
1445 fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self {
1450 pub type TyLayout<'tcx> = ::rustc_target::abi::TyLayout<'tcx, Ty<'tcx>>;
1452 impl<'a, 'tcx> LayoutOf for LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
1454 type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
1456 /// Computes the layout of a type. Note that this implicitly
1457 /// executes in "reveal all" mode.
1458 fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
1459 let param_env = self.param_env.with_reveal_all();
1460 let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1461 let details = self.tcx.layout_raw(param_env.and(ty))?;
1462 let layout = TyLayout {
1467 // NB: This recording is normally disabled; when enabled, it
1468 // can however trigger recursive invocations of `layout_of`.
1469 // Therefore, we execute it *after* the main query has
1470 // completed, to avoid problems around recursive structures
1471 // and the like. (Admittedly, I wasn't able to reproduce a problem
1472 // here, but it seems like the right thing to do. -nmatsakis)
1473 self.record_layout_for_printing(layout);
1479 impl<'a, 'tcx> LayoutOf for LayoutCx<'tcx, ty::query::TyCtxtAt<'a, 'tcx, 'tcx>> {
1481 type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
1483 /// Computes the layout of a type. Note that this implicitly
1484 /// executes in "reveal all" mode.
1485 fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
1486 let param_env = self.param_env.with_reveal_all();
1487 let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1488 let details = self.tcx.layout_raw(param_env.and(ty))?;
1489 let layout = TyLayout {
1494 // NB: This recording is normally disabled; when enabled, it
1495 // can however trigger recursive invocations of `layout_of`.
1496 // Therefore, we execute it *after* the main query has
1497 // completed, to avoid problems around recursive structures
1498 // and the like. (Admittedly, I wasn't able to reproduce a problem
1499 // here, but it seems like the right thing to do. -nmatsakis)
1502 param_env: self.param_env
1504 cx.record_layout_for_printing(layout);
1510 // Helper (inherent) `layout_of` methods to avoid pushing `LayoutCx` to users.
1511 impl TyCtxt<'a, 'tcx, '_> {
1512 /// Computes the layout of a type. Note that this implicitly
1513 /// executes in "reveal all" mode.
1515 pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
1516 -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
1518 tcx: self.global_tcx(),
1519 param_env: param_env_and_ty.param_env
1521 cx.layout_of(param_env_and_ty.value)
1525 impl ty::query::TyCtxtAt<'a, 'tcx, '_> {
1526 /// Computes the layout of a type. Note that this implicitly
1527 /// executes in "reveal all" mode.
1529 pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
1530 -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
1532 tcx: self.global_tcx().at(self.span),
1533 param_env: param_env_and_ty.param_env
1535 cx.layout_of(param_env_and_ty.value)
1539 impl<'a, 'tcx, C> TyLayoutMethods<'tcx, C> for Ty<'tcx>
1540 where C: LayoutOf<Ty = Ty<'tcx>> + HasTyCtxt<'tcx>,
1541 C::TyLayout: MaybeResult<TyLayout<'tcx>>
1543 fn for_variant(this: TyLayout<'tcx>, cx: C, variant_index: usize) -> TyLayout<'tcx> {
1544 let details = match this.variants {
1545 Variants::Single { index } if index == variant_index => this.details,
1547 Variants::Single { index } => {
1548 // Deny calling for_variant more than once for non-Single enums.
1549 cx.layout_of(this.ty).map_same(|layout| {
1550 assert_eq!(layout.variants, Variants::Single { index });
1554 let fields = match this.ty.sty {
1555 ty::TyAdt(def, _) => def.variants[variant_index].fields.len(),
1559 tcx.intern_layout(LayoutDetails {
1560 variants: Variants::Single { index: variant_index },
1561 fields: FieldPlacement::Union(fields),
1562 abi: Abi::Uninhabited,
1563 align: tcx.data_layout.i8_align,
1568 Variants::NicheFilling { ref variants, .. } |
1569 Variants::Tagged { ref variants, .. } => {
1570 &variants[variant_index]
1574 assert_eq!(details.variants, Variants::Single { index: variant_index });
1582 fn field(this: TyLayout<'tcx>, cx: C, i: usize) -> C::TyLayout {
1584 cx.layout_of(match this.ty.sty {
1593 ty::TyGeneratorWitness(..) |
1595 ty::TyDynamic(..) => {
1596 bug!("TyLayout::field_type({:?}): not applicable", this)
1599 // Potentially-fat pointers.
1600 ty::TyRef(_, pointee, _) |
1601 ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1602 assert!(i < this.fields.count());
1604 // Reuse the fat *T type as its own thin pointer data field.
1605 // This provides information about e.g. DST struct pointees
1606 // (which may have no non-DST form), and will work as long
1607 // as the `Abi` or `FieldPlacement` is checked by users.
1609 let nil = tcx.mk_nil();
1610 let ptr_ty = if this.ty.is_unsafe_ptr() {
1613 tcx.mk_mut_ref(tcx.types.re_static, nil)
1615 return cx.layout_of(ptr_ty).map_same(|mut ptr_layout| {
1616 ptr_layout.ty = this.ty;
1621 match tcx.struct_tail(pointee).sty {
1623 ty::TyStr => tcx.types.usize,
1624 ty::TyDynamic(data, _) => {
1625 let trait_def_id = data.principal().unwrap().def_id();
1626 let num_fns: u64 = crate::traits::supertrait_def_ids(tcx, trait_def_id)
1627 .map(|trait_def_id| {
1628 tcx.associated_items(trait_def_id)
1629 .filter(|item| item.kind == ty::AssociatedKind::Method)
1634 tcx.types.re_static,
1635 tcx.mk_array(tcx.types.usize, 3 + num_fns),
1637 /* FIXME use actual fn pointers
1639 tcx.mk_array(tcx.types.usize, 3),
1640 tcx.mk_array(Option<fn()>),
1644 _ => bug!("TyLayout::field_type({:?}): not applicable", this)
1648 // Arrays and slices.
1649 ty::TyArray(element, _) |
1650 ty::TySlice(element) => element,
1651 ty::TyStr => tcx.types.u8,
1653 // Tuples, generators and closures.
1654 ty::TyClosure(def_id, ref substs) => {
1655 substs.upvar_tys(def_id, tcx).nth(i).unwrap()
1658 ty::TyGenerator(def_id, ref substs, _) => {
1659 substs.field_tys(def_id, tcx).nth(i).unwrap()
1662 ty::TyTuple(tys) => tys[i],
1664 // SIMD vector types.
1665 ty::TyAdt(def, ..) if def.repr.simd() => {
1666 this.ty.simd_type(tcx)
1670 ty::TyAdt(def, substs) => {
1671 match this.variants {
1672 Variants::Single { index } => {
1673 def.variants[index].fields[i].ty(tcx, substs)
1676 // Discriminant field for enums (where applicable).
1677 Variants::Tagged { tag: ref discr, .. } |
1678 Variants::NicheFilling { niche: ref discr, .. } => {
1680 let layout = LayoutDetails::scalar(tcx, discr.clone());
1681 return MaybeResult::from_ok(TyLayout {
1682 details: tcx.intern_layout(layout),
1683 ty: discr.value.to_ty(tcx)
1689 ty::TyProjection(_) | ty::TyAnon(..) | ty::TyParam(_) |
1690 ty::TyInfer(_) | ty::TyError => {
1691 bug!("TyLayout::field_type: unexpected type `{}`", this.ty)
1704 fn reserve<'a, 'tcx>(
1706 cx: LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>>,
1708 ) -> Option<(u128, Scalar)> {
1709 if count > self.available {
1712 let Scalar { value, valid_range: ref v } = self.scalar;
1713 let bits = value.size(cx).bits();
1714 assert!(bits <= 128);
1715 let max_value = !0u128 >> (128 - bits);
1716 let start = v.end().wrapping_add(1) & max_value;
1717 let end = v.end().wrapping_add(count) & max_value;
1718 Some((start, Scalar { value, valid_range: *v.start()..=end }))
1722 impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
1723 /// Find the offset of a niche leaf field, starting from
1724 /// the given type and recursing through aggregates.
1725 // FIXME(eddyb) traverse already optimized enums.
1726 fn find_niche(self, layout: TyLayout<'tcx>) -> Result<Option<Niche>, LayoutError<'tcx>> {
1727 let scalar_niche = |scalar: &Scalar, offset| {
1728 let Scalar { value, valid_range: ref v } = *scalar;
1730 let bits = value.size(self).bits();
1731 assert!(bits <= 128);
1732 let max_value = !0u128 >> (128 - bits);
1734 // Find out how many values are outside the valid range.
1735 let available = if v.start() <= v.end() {
1736 v.start() + (max_value - v.end())
1738 v.start() - v.end() - 1
1741 // Give up if there is no niche value available.
1746 Some(Niche { offset, scalar: scalar.clone(), available })
1749 // Locals variables which live across yields are stored
1750 // in the generator type as fields. These may be uninitialized
1751 // so we don't look for niches there.
1752 if let ty::TyGenerator(..) = layout.ty.sty {
1757 Abi::Scalar(ref scalar) => {
1758 return Ok(scalar_niche(scalar, Size::ZERO));
1760 Abi::ScalarPair(ref a, ref b) => {
1761 // HACK(nox): We iter on `b` and then `a` because `max_by_key`
1762 // returns the last maximum.
1763 let niche = iter::once((b, a.value.size(self).abi_align(b.value.align(self))))
1764 .chain(iter::once((a, Size::ZERO)))
1765 .filter_map(|(scalar, offset)| scalar_niche(scalar, offset))
1766 .max_by_key(|niche| niche.available);
1769 Abi::Vector { ref element, .. } => {
1770 return Ok(scalar_niche(element, Size::ZERO));
1775 // Perhaps one of the fields is non-zero, let's recurse and find out.
1776 if let FieldPlacement::Union(_) = layout.fields {
1777 // Only Rust enums have safe-to-inspect fields
1778 // (a discriminant), other unions are unsafe.
1779 if let Variants::Single { .. } = layout.variants {
1783 if let FieldPlacement::Array { .. } = layout.fields {
1784 if layout.fields.count() > 0 {
1785 return self.find_niche(layout.field(self, 0)?);
1790 let mut niche = None;
1791 let mut available = 0;
1792 for i in 0..layout.fields.count() {
1793 if let Some(mut c) = self.find_niche(layout.field(self, i)?)? {
1794 if c.available > available {
1795 available = c.available;
1796 c.offset += layout.fields.offset(i);
1805 impl<'a> HashStable<StableHashingContext<'a>> for Variants {
1806 fn hash_stable<W: StableHasherResult>(&self,
1807 hcx: &mut StableHashingContext<'a>,
1808 hasher: &mut StableHasher<W>) {
1809 use ty::layout::Variants::*;
1810 mem::discriminant(self).hash_stable(hcx, hasher);
1813 Single { index } => {
1814 index.hash_stable(hcx, hasher);
1820 tag.hash_stable(hcx, hasher);
1821 variants.hash_stable(hcx, hasher);
1830 dataful_variant.hash_stable(hcx, hasher);
1831 niche_variants.start().hash_stable(hcx, hasher);
1832 niche_variants.end().hash_stable(hcx, hasher);
1833 niche.hash_stable(hcx, hasher);
1834 niche_start.hash_stable(hcx, hasher);
1835 variants.hash_stable(hcx, hasher);
1841 impl<'a> HashStable<StableHashingContext<'a>> for FieldPlacement {
1842 fn hash_stable<W: StableHasherResult>(&self,
1843 hcx: &mut StableHashingContext<'a>,
1844 hasher: &mut StableHasher<W>) {
1845 use ty::layout::FieldPlacement::*;
1846 mem::discriminant(self).hash_stable(hcx, hasher);
1850 count.hash_stable(hcx, hasher);
1852 Array { count, stride } => {
1853 count.hash_stable(hcx, hasher);
1854 stride.hash_stable(hcx, hasher);
1856 Arbitrary { ref offsets, ref memory_index } => {
1857 offsets.hash_stable(hcx, hasher);
1858 memory_index.hash_stable(hcx, hasher);
1864 impl<'a> HashStable<StableHashingContext<'a>> for Abi {
1865 fn hash_stable<W: StableHasherResult>(&self,
1866 hcx: &mut StableHashingContext<'a>,
1867 hasher: &mut StableHasher<W>) {
1868 use ty::layout::Abi::*;
1869 mem::discriminant(self).hash_stable(hcx, hasher);
1873 Scalar(ref value) => {
1874 value.hash_stable(hcx, hasher);
1876 ScalarPair(ref a, ref b) => {
1877 a.hash_stable(hcx, hasher);
1878 b.hash_stable(hcx, hasher);
1880 Vector { ref element, count } => {
1881 element.hash_stable(hcx, hasher);
1882 count.hash_stable(hcx, hasher);
1884 Aggregate { sized } => {
1885 sized.hash_stable(hcx, hasher);
1891 impl<'a> HashStable<StableHashingContext<'a>> for Scalar {
1892 fn hash_stable<W: StableHasherResult>(&self,
1893 hcx: &mut StableHashingContext<'a>,
1894 hasher: &mut StableHasher<W>) {
1895 let Scalar { value, ref valid_range } = *self;
1896 value.hash_stable(hcx, hasher);
1897 valid_range.start().hash_stable(hcx, hasher);
1898 valid_range.end().hash_stable(hcx, hasher);
1902 impl_stable_hash_for!(struct ::ty::layout::LayoutDetails {
1910 impl_stable_hash_for!(enum ::ty::layout::Integer {
1918 impl_stable_hash_for!(enum ::ty::layout::Primitive {
1919 Int(integer, signed),
1924 impl<'gcx> HashStable<StableHashingContext<'gcx>> for Align {
1925 fn hash_stable<W: StableHasherResult>(&self,
1926 hcx: &mut StableHashingContext<'gcx>,
1927 hasher: &mut StableHasher<W>) {
1928 self.abi().hash_stable(hcx, hasher);
1929 self.pref().hash_stable(hcx, hasher);
1933 impl<'gcx> HashStable<StableHashingContext<'gcx>> for Size {
1934 fn hash_stable<W: StableHasherResult>(&self,
1935 hcx: &mut StableHashingContext<'gcx>,
1936 hasher: &mut StableHasher<W>) {
1937 self.bytes().hash_stable(hcx, hasher);
1941 impl<'a, 'gcx> HashStable<StableHashingContext<'a>> for LayoutError<'gcx>
1943 fn hash_stable<W: StableHasherResult>(&self,
1944 hcx: &mut StableHashingContext<'a>,
1945 hasher: &mut StableHasher<W>) {
1946 use ty::layout::LayoutError::*;
1947 mem::discriminant(self).hash_stable(hcx, hasher);
1951 SizeOverflow(t) => t.hash_stable(hcx, hasher)