1 // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 use session::{self, DataTypeKind};
12 use ty::{self, Ty, TyCtxt, TypeFoldable, ReprOptions};
14 use syntax::ast::{self, FloatTy, IntTy, UintTy};
16 use syntax_pos::DUMMY_SP;
23 use ich::StableHashingContext;
24 use rustc_data_structures::stable_hasher::{HashStable, StableHasher,
27 pub use rustc_target::abi::*;
29 pub trait IntegerExt {
30 fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, signed: bool) -> Ty<'tcx>;
31 fn from_attr<C: HasDataLayout>(cx: C, ity: attr::IntType) -> Integer;
32 fn repr_discr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
40 impl IntegerExt for Integer {
41 fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, signed: bool) -> Ty<'tcx> {
42 match (*self, signed) {
43 (I8, false) => tcx.types.u8,
44 (I16, false) => tcx.types.u16,
45 (I32, false) => tcx.types.u32,
46 (I64, false) => tcx.types.u64,
47 (I128, false) => tcx.types.u128,
48 (I8, true) => tcx.types.i8,
49 (I16, true) => tcx.types.i16,
50 (I32, true) => tcx.types.i32,
51 (I64, true) => tcx.types.i64,
52 (I128, true) => tcx.types.i128,
56 /// Get the Integer type from an attr::IntType.
57 fn from_attr<C: HasDataLayout>(cx: C, ity: attr::IntType) -> Integer {
58 let dl = cx.data_layout();
61 attr::SignedInt(IntTy::I8) | attr::UnsignedInt(UintTy::U8) => I8,
62 attr::SignedInt(IntTy::I16) | attr::UnsignedInt(UintTy::U16) => I16,
63 attr::SignedInt(IntTy::I32) | attr::UnsignedInt(UintTy::U32) => I32,
64 attr::SignedInt(IntTy::I64) | attr::UnsignedInt(UintTy::U64) => I64,
65 attr::SignedInt(IntTy::I128) | attr::UnsignedInt(UintTy::U128) => I128,
66 attr::SignedInt(IntTy::Isize) | attr::UnsignedInt(UintTy::Usize) => {
67 dl.ptr_sized_integer()
72 /// Find the appropriate Integer type and signedness for the given
73 /// signed discriminant range and #[repr] attribute.
74 /// N.B.: u128 values above i128::MAX will be treated as signed, but
75 /// that shouldn't affect anything, other than maybe debuginfo.
76 fn repr_discr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
82 // Theoretically, negative values could be larger in unsigned representation
83 // than the unsigned representation of the signed minimum. However, if there
84 // are any negative values, the only valid unsigned representation is u128
85 // which can fit all i128 values, so the result remains unaffected.
86 let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
87 let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
89 let mut min_from_extern = None;
92 if let Some(ity) = repr.int {
93 let discr = Integer::from_attr(tcx, ity);
94 let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
96 bug!("Integer::repr_discr: `#[repr]` hint too small for \
97 discriminant range of enum `{}", ty)
99 return (discr, ity.is_signed());
103 match &tcx.sess.target.target.arch[..] {
104 // WARNING: the ARM EABI has two variants; the one corresponding
105 // to `at_least == I32` appears to be used on Linux and NetBSD,
106 // but some systems may use the variant corresponding to no
107 // lower bound. However, we don't run on those yet...?
108 "arm" => min_from_extern = Some(I32),
109 _ => min_from_extern = Some(I32),
113 let at_least = min_from_extern.unwrap_or(min_default);
115 // If there are no negative values, we can use the unsigned fit.
117 (cmp::max(unsigned_fit, at_least), false)
119 (cmp::max(signed_fit, at_least), true)
124 pub trait PrimitiveExt {
125 fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx>;
128 impl PrimitiveExt for Primitive {
129 fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx> {
131 Int(i, signed) => i.to_ty(tcx, signed),
132 F32 => tcx.types.f32,
133 F64 => tcx.types.f64,
134 Pointer => tcx.mk_mut_ptr(tcx.mk_nil()),
139 /// The first half of a fat pointer.
141 /// - For a trait object, this is the address of the box.
142 /// - For a slice, this is the base address.
143 pub const FAT_PTR_ADDR: usize = 0;
145 /// The second half of a fat pointer.
147 /// - For a trait object, this is the address of the vtable.
148 /// - For a slice, this is the length.
149 pub const FAT_PTR_EXTRA: usize = 1;
151 #[derive(Copy, Clone, Debug)]
152 pub enum LayoutError<'tcx> {
154 SizeOverflow(Ty<'tcx>)
157 impl<'tcx> fmt::Display for LayoutError<'tcx> {
158 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
160 LayoutError::Unknown(ty) => {
161 write!(f, "the type `{:?}` has an unknown layout", ty)
163 LayoutError::SizeOverflow(ty) => {
164 write!(f, "the type `{:?}` is too big for the current architecture", ty)
170 fn layout_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
171 query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
172 -> Result<&'tcx LayoutDetails, LayoutError<'tcx>>
174 ty::tls::with_related_context(tcx, move |icx| {
175 let rec_limit = *tcx.sess.recursion_limit.get();
176 let (param_env, ty) = query.into_parts();
178 if icx.layout_depth > rec_limit {
180 &format!("overflow representing the type `{}`", ty));
183 // Update the ImplicitCtxt to increase the layout_depth
184 let icx = ty::tls::ImplicitCtxt {
185 layout_depth: icx.layout_depth + 1,
189 ty::tls::enter_context(&icx, |_| {
190 let cx = LayoutCx { tcx, param_env };
191 cx.layout_raw_uncached(ty)
196 pub fn provide(providers: &mut ty::maps::Providers) {
197 *providers = ty::maps::Providers {
203 #[derive(Copy, Clone)]
204 pub struct LayoutCx<'tcx, C> {
206 pub param_env: ty::ParamEnv<'tcx>
209 impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
210 fn layout_raw_uncached(self, ty: Ty<'tcx>)
211 -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> {
213 let param_env = self.param_env;
214 let dl = self.data_layout();
215 let scalar_unit = |value: Primitive| {
216 let bits = value.size(dl).bits();
217 assert!(bits <= 128);
220 valid_range: 0..=(!0 >> (128 - bits))
223 let scalar = |value: Primitive| {
224 tcx.intern_layout(LayoutDetails::scalar(self, scalar_unit(value)))
226 let scalar_pair = |a: Scalar, b: Scalar| {
227 let align = a.value.align(dl).max(b.value.align(dl)).max(dl.aggregate_align);
228 let b_offset = a.value.size(dl).abi_align(b.value.align(dl));
229 let size = (b_offset + b.value.size(dl)).abi_align(align);
231 variants: Variants::Single { index: 0 },
232 fields: FieldPlacement::Arbitrary {
233 offsets: vec![Size::from_bytes(0), b_offset],
234 memory_index: vec![0, 1]
236 abi: Abi::ScalarPair(a, b),
242 #[derive(Copy, Clone, Debug)]
244 /// A tuple, closure, or univariant which cannot be coerced to unsized.
246 /// A univariant, the last field of which may be coerced to unsized.
248 /// A univariant, but with a prefix of an arbitrary size & alignment (e.g. enum tag).
249 Prefixed(Size, Align),
251 let univariant_uninterned = |fields: &[TyLayout], repr: &ReprOptions, kind| {
252 let packed = repr.packed();
253 if packed && repr.align > 0 {
254 bug!("struct cannot be packed and aligned");
258 let pack = repr.pack as u64;
259 Align::from_bytes(pack, pack).unwrap()
262 let mut align = if packed {
268 let mut sized = true;
269 let mut offsets = vec![Size::from_bytes(0); fields.len()];
270 let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
272 let mut optimize = !repr.inhibit_struct_field_reordering_opt();
273 if let StructKind::Prefixed(_, align) = kind {
274 optimize &= align.abi() == 1;
278 let end = if let StructKind::MaybeUnsized = kind {
283 let optimizing = &mut inverse_memory_index[..end];
284 let field_align = |f: &TyLayout| {
285 if packed { f.align.min(pack).abi() } else { f.align.abi() }
288 StructKind::AlwaysSized |
289 StructKind::MaybeUnsized => {
290 optimizing.sort_by_key(|&x| {
291 // Place ZSTs first to avoid "interesting offsets",
292 // especially with only one or two non-ZST fields.
293 let f = &fields[x as usize];
294 (!f.is_zst(), cmp::Reverse(field_align(f)))
297 StructKind::Prefixed(..) => {
298 optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
303 // inverse_memory_index holds field indices by increasing memory offset.
304 // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
305 // We now write field offsets to the corresponding offset slot;
306 // field 5 with offset 0 puts 0 in offsets[5].
307 // At the bottom of this function, we use inverse_memory_index to produce memory_index.
309 let mut offset = Size::from_bytes(0);
311 if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
313 let prefix_align = prefix_align.min(pack);
314 align = align.max(prefix_align);
316 align = align.max(prefix_align);
318 offset = prefix_size.abi_align(prefix_align);
321 for &i in &inverse_memory_index {
322 let field = fields[i as usize];
324 bug!("univariant: field #{} of `{}` comes after unsized field",
328 if field.abi == Abi::Uninhabited {
329 return Ok(LayoutDetails::uninhabited(fields.len()));
332 if field.is_unsized() {
336 // Invariant: offset < dl.obj_size_bound() <= 1<<61
338 let field_pack = field.align.min(pack);
339 offset = offset.abi_align(field_pack);
340 align = align.max(field_pack);
343 offset = offset.abi_align(field.align);
344 align = align.max(field.align);
347 debug!("univariant offset: {:?} field: {:#?}", offset, field);
348 offsets[i as usize] = offset;
350 offset = offset.checked_add(field.size, dl)
351 .ok_or(LayoutError::SizeOverflow(ty))?;
355 let repr_align = repr.align as u64;
356 align = align.max(Align::from_bytes(repr_align, repr_align).unwrap());
357 debug!("univariant repr_align: {:?}", repr_align);
360 debug!("univariant min_size: {:?}", offset);
361 let min_size = offset;
363 // As stated above, inverse_memory_index holds field indices by increasing offset.
364 // This makes it an already-sorted view of the offsets vec.
365 // To invert it, consider:
366 // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
367 // Field 5 would be the first element, so memory_index is i:
368 // Note: if we didn't optimize, it's already right.
370 let mut memory_index;
372 memory_index = vec![0; inverse_memory_index.len()];
374 for i in 0..inverse_memory_index.len() {
375 memory_index[inverse_memory_index[i] as usize] = i as u32;
378 memory_index = inverse_memory_index;
381 let size = min_size.abi_align(align);
382 let mut abi = Abi::Aggregate { sized };
384 // Unpack newtype ABIs and find scalar pairs.
385 if sized && size.bytes() > 0 {
386 // All other fields must be ZSTs, and we need them to all start at 0.
387 let mut zst_offsets =
388 offsets.iter().enumerate().filter(|&(i, _)| fields[i].is_zst());
389 if zst_offsets.all(|(_, o)| o.bytes() == 0) {
390 let mut non_zst_fields =
391 fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
393 match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
394 // We have exactly one non-ZST field.
395 (Some((i, field)), None, None) => {
396 // Field fills the struct and it has a scalar or scalar pair ABI.
397 if offsets[i].bytes() == 0 &&
398 align.abi() == field.align.abi() &&
401 // For plain scalars, or vectors of them, we can't unpack
402 // newtypes for `#[repr(C)]`, as that affects C ABIs.
403 Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
404 abi = field.abi.clone();
406 // But scalar pairs are Rust-specific and get
407 // treated as aggregates by C ABIs anyway.
408 Abi::ScalarPair(..) => {
409 abi = field.abi.clone();
416 // Two non-ZST fields, and they're both scalars.
417 (Some((i, &TyLayout {
418 details: &LayoutDetails { abi: Abi::Scalar(ref a), .. }, ..
419 })), Some((j, &TyLayout {
420 details: &LayoutDetails { abi: Abi::Scalar(ref b), .. }, ..
422 // Order by the memory placement, not source order.
423 let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
428 let pair = scalar_pair(a.clone(), b.clone());
429 let pair_offsets = match pair.fields {
430 FieldPlacement::Arbitrary {
434 assert_eq!(memory_index, &[0, 1]);
439 if offsets[i] == pair_offsets[0] &&
440 offsets[j] == pair_offsets[1] &&
441 align == pair.align &&
443 // We can use `ScalarPair` only when it matches our
444 // already computed layout (including `#[repr(C)]`).
455 variants: Variants::Single { index: 0 },
456 fields: FieldPlacement::Arbitrary {
465 let univariant = |fields: &[TyLayout], repr: &ReprOptions, kind| {
466 Ok(tcx.intern_layout(univariant_uninterned(fields, repr, kind)?))
468 assert!(!ty.has_infer_types());
473 tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
474 value: Int(I8, false),
479 tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
480 value: Int(I32, false),
481 valid_range: 0..=0x10FFFF
485 scalar(Int(Integer::from_attr(dl, attr::SignedInt(ity)), true))
488 scalar(Int(Integer::from_attr(dl, attr::UnsignedInt(ity)), false))
490 ty::TyFloat(FloatTy::F32) => scalar(F32),
491 ty::TyFloat(FloatTy::F64) => scalar(F64),
493 let mut ptr = scalar_unit(Pointer);
494 ptr.valid_range = 1..=*ptr.valid_range.end();
495 tcx.intern_layout(LayoutDetails::scalar(self, ptr))
500 tcx.intern_layout(LayoutDetails::uninhabited(0))
503 // Potentially-fat pointers.
504 ty::TyRef(_, ty::TypeAndMut { ty: pointee, .. }) |
505 ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
506 let mut data_ptr = scalar_unit(Pointer);
507 if !ty.is_unsafe_ptr() {
508 data_ptr.valid_range = 1..=*data_ptr.valid_range.end();
511 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
512 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
513 return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
516 let unsized_part = tcx.struct_tail(pointee);
517 let metadata = match unsized_part.sty {
518 ty::TyForeign(..) => {
519 return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
521 ty::TySlice(_) | ty::TyStr => {
522 scalar_unit(Int(dl.ptr_sized_integer(), false))
524 ty::TyDynamic(..) => {
525 let mut vtable = scalar_unit(Pointer);
526 vtable.valid_range = 1..=*vtable.valid_range.end();
529 _ => return Err(LayoutError::Unknown(unsized_part))
532 // Effectively a (ptr, meta) tuple.
533 tcx.intern_layout(scalar_pair(data_ptr, metadata))
536 // Arrays and slices.
537 ty::TyArray(element, mut count) => {
538 if count.has_projections() {
539 count = tcx.normalize_erasing_regions(param_env, count);
540 if count.has_projections() {
541 return Err(LayoutError::Unknown(ty));
545 let element = self.layout_of(element)?;
546 let count = count.val.unwrap_u64();
547 let size = element.size.checked_mul(count, dl)
548 .ok_or(LayoutError::SizeOverflow(ty))?;
550 tcx.intern_layout(LayoutDetails {
551 variants: Variants::Single { index: 0 },
552 fields: FieldPlacement::Array {
553 stride: element.size,
556 abi: Abi::Aggregate { sized: true },
557 align: element.align,
561 ty::TySlice(element) => {
562 let element = self.layout_of(element)?;
563 tcx.intern_layout(LayoutDetails {
564 variants: Variants::Single { index: 0 },
565 fields: FieldPlacement::Array {
566 stride: element.size,
569 abi: Abi::Aggregate { sized: false },
570 align: element.align,
571 size: Size::from_bytes(0)
575 tcx.intern_layout(LayoutDetails {
576 variants: Variants::Single { index: 0 },
577 fields: FieldPlacement::Array {
578 stride: Size::from_bytes(1),
581 abi: Abi::Aggregate { sized: false },
583 size: Size::from_bytes(0)
589 univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?
591 ty::TyDynamic(..) | ty::TyForeign(..) => {
592 let mut unit = univariant_uninterned(&[], &ReprOptions::default(),
593 StructKind::AlwaysSized)?;
595 Abi::Aggregate { ref mut sized } => *sized = false,
598 tcx.intern_layout(unit)
601 // Tuples, generators and closures.
602 ty::TyGenerator(def_id, ref substs, _) => {
603 let tys = substs.field_tys(def_id, tcx);
604 univariant(&tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
605 &ReprOptions::default(),
606 StructKind::AlwaysSized)?
609 ty::TyClosure(def_id, ref substs) => {
610 let tys = substs.upvar_tys(def_id, tcx);
611 univariant(&tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
612 &ReprOptions::default(),
613 StructKind::AlwaysSized)?
616 ty::TyTuple(tys) => {
617 let kind = if tys.len() == 0 {
618 StructKind::AlwaysSized
620 StructKind::MaybeUnsized
623 univariant(&tys.iter().map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
624 &ReprOptions::default(), kind)?
627 // SIMD vector types.
628 ty::TyAdt(def, ..) if def.repr.simd() => {
629 let element = self.layout_of(ty.simd_type(tcx))?;
630 let count = ty.simd_size(tcx) as u64;
632 let scalar = match element.abi {
633 Abi::Scalar(ref scalar) => scalar.clone(),
635 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` with \
636 a non-machine element type `{}`",
640 let size = element.size.checked_mul(count, dl)
641 .ok_or(LayoutError::SizeOverflow(ty))?;
642 let align = dl.vector_align(size);
643 let size = size.abi_align(align);
645 tcx.intern_layout(LayoutDetails {
646 variants: Variants::Single { index: 0 },
647 fields: FieldPlacement::Array {
648 stride: element.size,
661 ty::TyAdt(def, substs) => {
662 // Cache the field layouts.
663 let variants = def.variants.iter().map(|v| {
664 v.fields.iter().map(|field| {
665 self.layout_of(field.ty(tcx, substs))
666 }).collect::<Result<Vec<_>, _>>()
667 }).collect::<Result<Vec<_>, _>>()?;
670 let packed = def.repr.packed();
671 if packed && def.repr.align > 0 {
672 bug!("Union cannot be packed and aligned");
676 let pack = def.repr.pack as u64;
677 Align::from_bytes(pack, pack).unwrap()
680 let mut align = if packed {
686 if def.repr.align > 0 {
687 let repr_align = def.repr.align as u64;
689 Align::from_bytes(repr_align, repr_align).unwrap());
692 let mut size = Size::from_bytes(0);
693 for field in &variants[0] {
694 assert!(!field.is_unsized());
697 let field_pack = field.align.min(pack);
698 align = align.max(field_pack);
700 align = align.max(field.align);
702 size = cmp::max(size, field.size);
705 return Ok(tcx.intern_layout(LayoutDetails {
706 variants: Variants::Single { index: 0 },
707 fields: FieldPlacement::Union(variants[0].len()),
708 abi: Abi::Aggregate { sized: true },
710 size: size.abi_align(align)
714 let (inh_first, inh_second) = {
715 let mut inh_variants = (0..variants.len()).filter(|&v| {
716 variants[v].iter().all(|f| f.abi != Abi::Uninhabited)
718 (inh_variants.next(), inh_variants.next())
720 if inh_first.is_none() {
721 // Uninhabited because it has no variants, or only uninhabited ones.
722 return Ok(tcx.intern_layout(LayoutDetails::uninhabited(0)));
725 let is_struct = !def.is_enum() ||
726 // Only one variant is inhabited.
727 (inh_second.is_none() &&
728 // Representation optimizations are allowed.
729 !def.repr.inhibit_enum_layout_opt());
731 // Struct, or univariant enum equivalent to a struct.
732 // (Typechecking will reject discriminant-sizing attrs.)
734 let v = inh_first.unwrap();
735 let kind = if def.is_enum() || variants[v].len() == 0 {
736 StructKind::AlwaysSized
738 let param_env = tcx.param_env(def.did);
739 let last_field = def.variants[v].fields.last().unwrap();
740 let always_sized = tcx.type_of(last_field.did)
741 .is_sized(tcx.at(DUMMY_SP), param_env);
742 if !always_sized { StructKind::MaybeUnsized }
743 else { StructKind::AlwaysSized }
746 let mut st = univariant_uninterned(&variants[v], &def.repr, kind)?;
747 st.variants = Variants::Single { index: v };
748 // Exclude 0 from the range of a newtype ABI NonZero<T>.
749 if Some(def.did) == self.tcx.lang_items().non_zero() {
751 Abi::Scalar(ref mut scalar) |
752 Abi::ScalarPair(ref mut scalar, _) => {
753 if *scalar.valid_range.start() == 0 {
754 scalar.valid_range = 1..=*scalar.valid_range.end();
760 return Ok(tcx.intern_layout(st));
763 // The current code for niche-filling relies on variant indices
764 // instead of actual discriminants, so dataful enums with
765 // explicit discriminants (RFC #2363) would misbehave.
766 let no_explicit_discriminants = def.variants.iter().enumerate()
767 .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i));
769 // Niche-filling enum optimization.
770 if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
771 let mut dataful_variant = None;
772 let mut niche_variants = usize::max_value()..=0;
774 // Find one non-ZST variant.
775 'variants: for (v, fields) in variants.iter().enumerate() {
776 if fields.iter().any(|f| f.abi == Abi::Uninhabited) {
781 if dataful_variant.is_none() {
782 dataful_variant = Some(v);
785 dataful_variant = None;
790 niche_variants = *niche_variants.start().min(&v)..=v;
793 if niche_variants.start() > niche_variants.end() {
794 dataful_variant = None;
797 if let Some(i) = dataful_variant {
798 let count = (niche_variants.end() - niche_variants.start() + 1) as u128;
799 for (field_index, &field) in variants[i].iter().enumerate() {
800 let (offset, niche, niche_start) =
801 match self.find_niche(field, count)? {
802 Some(niche) => niche,
805 let mut align = dl.aggregate_align;
806 let st = variants.iter().enumerate().map(|(j, v)| {
807 let mut st = univariant_uninterned(v,
808 &def.repr, StructKind::AlwaysSized)?;
809 st.variants = Variants::Single { index: j };
811 align = align.max(st.align);
814 }).collect::<Result<Vec<_>, _>>()?;
816 let offset = st[i].fields.offset(field_index) + offset;
817 let size = st[i].size;
819 let abi = match st[i].abi {
820 Abi::Scalar(_) => Abi::Scalar(niche.clone()),
821 Abi::ScalarPair(ref first, ref second) => {
822 // We need to use scalar_unit to reset the
823 // valid range to the maximal one for that
824 // primitive, because only the niche is
825 // guaranteed to be initialised, not the
827 if offset.bytes() == 0 {
828 Abi::ScalarPair(niche.clone(), scalar_unit(second.value))
830 Abi::ScalarPair(scalar_unit(first.value), niche.clone())
833 _ => Abi::Aggregate { sized: true },
836 return Ok(tcx.intern_layout(LayoutDetails {
837 variants: Variants::NicheFilling {
844 fields: FieldPlacement::Arbitrary {
845 offsets: vec![offset],
846 memory_index: vec![0]
856 let (mut min, mut max) = (i128::max_value(), i128::min_value());
857 let discr_type = def.repr.discr_type();
858 let bits = Integer::from_attr(tcx, discr_type).size().bits();
859 for (i, discr) in def.discriminants(tcx).enumerate() {
860 if variants[i].iter().any(|f| f.abi == Abi::Uninhabited) {
863 let mut x = discr.val as i128;
864 if discr_type.is_signed() {
865 // sign extend the raw representation to be an i128
866 x = (x << (128 - bits)) >> (128 - bits);
868 if x < min { min = x; }
869 if x > max { max = x; }
871 assert!(min <= max, "discriminant range is {}...{}", min, max);
872 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
874 let mut align = dl.aggregate_align;
875 let mut size = Size::from_bytes(0);
877 // We're interested in the smallest alignment, so start large.
878 let mut start_align = Align::from_bytes(256, 256).unwrap();
879 assert_eq!(Integer::for_abi_align(dl, start_align), None);
881 // repr(C) on an enum tells us to make a (tag, union) layout,
882 // so we need to grow the prefix alignment to be at least
883 // the alignment of the union. (This value is used both for
884 // determining the alignment of the overall enum, and the
885 // determining the alignment of the payload after the tag.)
886 let mut prefix_align = min_ity.align(dl);
888 for fields in &variants {
889 for field in fields {
890 prefix_align = prefix_align.max(field.align);
895 // Create the set of structs that represent each variant.
896 let mut layout_variants = variants.iter().enumerate().map(|(i, field_layouts)| {
897 let mut st = univariant_uninterned(&field_layouts,
898 &def.repr, StructKind::Prefixed(min_ity.size(), prefix_align))?;
899 st.variants = Variants::Single { index: i };
900 // Find the first field we can't move later
901 // to make room for a larger discriminant.
902 for field in st.fields.index_by_increasing_offset().map(|j| field_layouts[j]) {
903 if !field.is_zst() || field.align.abi() != 1 {
904 start_align = start_align.min(field.align);
908 size = cmp::max(size, st.size);
909 align = align.max(st.align);
911 }).collect::<Result<Vec<_>, _>>()?;
913 // Align the maximum variant size to the largest alignment.
914 size = size.abi_align(align);
916 if size.bytes() >= dl.obj_size_bound() {
917 return Err(LayoutError::SizeOverflow(ty));
920 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
921 if typeck_ity < min_ity {
922 // It is a bug if Layout decided on a greater discriminant size than typeck for
923 // some reason at this point (based on values discriminant can take on). Mostly
924 // because this discriminant will be loaded, and then stored into variable of
925 // type calculated by typeck. Consider such case (a bug): typeck decided on
926 // byte-sized discriminant, but layout thinks we need a 16-bit to store all
927 // discriminant values. That would be a bug, because then, in trans, in order
928 // to store this 16-bit discriminant into 8-bit sized temporary some of the
929 // space necessary to represent would have to be discarded (or layout is wrong
930 // on thinking it needs 16 bits)
931 bug!("layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
932 min_ity, typeck_ity);
933 // However, it is fine to make discr type however large (as an optimisation)
934 // after this point – we’ll just truncate the value we load in trans.
937 // Check to see if we should use a different type for the
938 // discriminant. We can safely use a type with the same size
939 // as the alignment of the first field of each variant.
940 // We increase the size of the discriminant to avoid LLVM copying
941 // padding when it doesn't need to. This normally causes unaligned
942 // load/stores and excessive memcpy/memset operations. By using a
943 // bigger integer size, LLVM can be sure about it's contents and
944 // won't be so conservative.
946 // Use the initial field alignment
947 let mut ity = Integer::for_abi_align(dl, start_align).unwrap_or(min_ity);
949 // If the alignment is not larger than the chosen discriminant size,
950 // don't use the alignment as the final size.
954 // Patch up the variants' first few fields.
955 let old_ity_size = min_ity.size();
956 let new_ity_size = ity.size();
957 for variant in &mut layout_variants {
958 if variant.abi == Abi::Uninhabited {
961 match variant.fields {
962 FieldPlacement::Arbitrary { ref mut offsets, .. } => {
964 if *i <= old_ity_size {
965 assert_eq!(*i, old_ity_size);
969 // We might be making the struct larger.
970 if variant.size <= old_ity_size {
971 variant.size = new_ity_size;
979 let tag_mask = !0u128 >> (128 - ity.size().bits());
981 value: Int(ity, signed),
982 valid_range: (min as u128 & tag_mask)..=(max as u128 & tag_mask),
984 let mut abi = Abi::Aggregate { sized: true };
985 if tag.value.size(dl) == size {
986 abi = Abi::Scalar(tag.clone());
987 } else if !tag.is_bool() {
988 // HACK(nox): Blindly using ScalarPair for all tagged enums
989 // where applicable leads to Option<u8> being handled as {i1, i8},
990 // which later confuses SROA and some loop optimisations,
991 // ultimately leading to the repeat-trusted-len test
992 // failing. We make the trade-off of using ScalarPair only
993 // for types where the tag isn't a boolean.
994 let mut common_prim = None;
995 for (field_layouts, layout_variant) in variants.iter().zip(&layout_variants) {
996 let offsets = match layout_variant.fields {
997 FieldPlacement::Arbitrary { ref offsets, .. } => offsets,
1000 let mut fields = field_layouts
1003 .filter(|p| !p.0.is_zst());
1004 let (field, offset) = match (fields.next(), fields.next()) {
1005 (None, None) => continue,
1006 (Some(pair), None) => pair,
1012 let prim = match field.details.abi {
1013 Abi::Scalar(ref scalar) => scalar.value,
1019 if let Some(pair) = common_prim {
1020 // This is pretty conservative. We could go fancier
1021 // by conflating things like i32 and u32, or even
1022 // realising that (u8, u8) could just cohabit with
1024 if pair != (prim, offset) {
1029 common_prim = Some((prim, offset));
1032 if let Some((prim, offset)) = common_prim {
1033 let pair = scalar_pair(tag.clone(), scalar_unit(prim));
1034 let pair_offsets = match pair.fields {
1035 FieldPlacement::Arbitrary {
1039 assert_eq!(memory_index, &[0, 1]);
1044 if pair_offsets[0] == Size::from_bytes(0) &&
1045 pair_offsets[1] == *offset &&
1046 align == pair.align &&
1048 // We can use `ScalarPair` only when it matches our
1049 // already computed layout (including `#[repr(C)]`).
1054 tcx.intern_layout(LayoutDetails {
1055 variants: Variants::Tagged {
1057 variants: layout_variants,
1059 fields: FieldPlacement::Arbitrary {
1060 offsets: vec![Size::from_bytes(0)],
1061 memory_index: vec![0]
1069 // Types with no meaningful known layout.
1070 ty::TyProjection(_) | ty::TyAnon(..) => {
1071 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1072 if ty == normalized {
1073 return Err(LayoutError::Unknown(ty));
1075 tcx.layout_raw(param_env.and(normalized))?
1078 return Err(LayoutError::Unknown(ty));
1080 ty::TyGeneratorWitness(..) | ty::TyInfer(_) | ty::TyError => {
1081 bug!("LayoutDetails::compute: unexpected type `{}`", ty)
1086 /// This is invoked by the `layout_raw` query to record the final
1087 /// layout of each type.
1089 fn record_layout_for_printing(self, layout: TyLayout<'tcx>) {
1090 // If we are running with `-Zprint-type-sizes`, record layouts for
1091 // dumping later. Ignore layouts that are done with non-empty
1092 // environments or non-monomorphic layouts, as the user only wants
1093 // to see the stuff resulting from the final trans session.
1095 !self.tcx.sess.opts.debugging_opts.print_type_sizes ||
1096 layout.ty.has_param_types() ||
1097 layout.ty.has_self_ty() ||
1098 !self.param_env.caller_bounds.is_empty()
1103 self.record_layout_for_printing_outlined(layout)
1106 fn record_layout_for_printing_outlined(self, layout: TyLayout<'tcx>) {
1107 // (delay format until we actually need it)
1108 let record = |kind, packed, opt_discr_size, variants| {
1109 let type_desc = format!("{:?}", layout.ty);
1110 self.tcx.sess.code_stats.borrow_mut().record_type_size(kind,
1119 let adt_def = match layout.ty.sty {
1120 ty::TyAdt(ref adt_def, _) => {
1121 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1125 ty::TyClosure(..) => {
1126 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1127 record(DataTypeKind::Closure, false, None, vec![]);
1132 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1137 let adt_kind = adt_def.adt_kind();
1138 let adt_packed = adt_def.repr.packed();
1140 let build_variant_info = |n: Option<ast::Name>,
1142 layout: TyLayout<'tcx>| {
1143 let mut min_size = Size::from_bytes(0);
1144 let field_info: Vec<_> = flds.iter().enumerate().map(|(i, &name)| {
1145 match layout.field(self, i) {
1147 bug!("no layout found for field {}: `{:?}`", name, err);
1149 Ok(field_layout) => {
1150 let offset = layout.fields.offset(i);
1151 let field_end = offset + field_layout.size;
1152 if min_size < field_end {
1153 min_size = field_end;
1155 session::FieldInfo {
1156 name: name.to_string(),
1157 offset: offset.bytes(),
1158 size: field_layout.size.bytes(),
1159 align: field_layout.align.abi(),
1165 session::VariantInfo {
1166 name: n.map(|n|n.to_string()),
1167 kind: if layout.is_unsized() {
1168 session::SizeKind::Min
1170 session::SizeKind::Exact
1172 align: layout.align.abi(),
1173 size: if min_size.bytes() == 0 {
1182 match layout.variants {
1183 Variants::Single { index } => {
1184 debug!("print-type-size `{:#?}` variant {}",
1185 layout, adt_def.variants[index].name);
1186 if !adt_def.variants.is_empty() {
1187 let variant_def = &adt_def.variants[index];
1188 let fields: Vec<_> =
1189 variant_def.fields.iter().map(|f| f.name).collect();
1190 record(adt_kind.into(),
1193 vec![build_variant_info(Some(variant_def.name),
1197 // (This case arises for *empty* enums; so give it
1199 record(adt_kind.into(), adt_packed, None, vec![]);
1203 Variants::NicheFilling { .. } |
1204 Variants::Tagged { .. } => {
1205 debug!("print-type-size `{:#?}` adt general variants def {}",
1206 layout.ty, adt_def.variants.len());
1207 let variant_infos: Vec<_> =
1208 adt_def.variants.iter().enumerate().map(|(i, variant_def)| {
1209 let fields: Vec<_> =
1210 variant_def.fields.iter().map(|f| f.name).collect();
1211 build_variant_info(Some(variant_def.name),
1213 layout.for_variant(self, i))
1216 record(adt_kind.into(), adt_packed, match layout.variants {
1217 Variants::Tagged { ref discr, .. } => Some(discr.value.size(self)),
1225 /// Type size "skeleton", i.e. the only information determining a type's size.
1226 /// While this is conservative, (aside from constant sizes, only pointers,
1227 /// newtypes thereof and null pointer optimized enums are allowed), it is
1228 /// enough to statically check common usecases of transmute.
1229 #[derive(Copy, Clone, Debug)]
1230 pub enum SizeSkeleton<'tcx> {
1231 /// Any statically computable Layout.
1234 /// A potentially-fat pointer.
1236 /// If true, this pointer is never null.
1238 /// The type which determines the unsized metadata, if any,
1239 /// of this pointer. Either a type parameter or a projection
1240 /// depending on one, with regions erased.
1245 impl<'a, 'tcx> SizeSkeleton<'tcx> {
1246 pub fn compute(ty: Ty<'tcx>,
1247 tcx: TyCtxt<'a, 'tcx, 'tcx>,
1248 param_env: ty::ParamEnv<'tcx>)
1249 -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1250 assert!(!ty.has_infer_types());
1252 // First try computing a static layout.
1253 let err = match tcx.layout_of(param_env.and(ty)) {
1255 return Ok(SizeSkeleton::Known(layout.size));
1261 ty::TyRef(_, ty::TypeAndMut { ty: pointee, .. }) |
1262 ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1263 let non_zero = !ty.is_unsafe_ptr();
1264 let tail = tcx.struct_tail(pointee);
1266 ty::TyParam(_) | ty::TyProjection(_) => {
1267 assert!(tail.has_param_types() || tail.has_self_ty());
1268 Ok(SizeSkeleton::Pointer {
1270 tail: tcx.erase_regions(&tail)
1274 bug!("SizeSkeleton::compute({}): layout errored ({}), yet \
1275 tail `{}` is not a type parameter or a projection",
1281 ty::TyAdt(def, substs) => {
1282 // Only newtypes and enums w/ nullable pointer optimization.
1283 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
1287 // Get a zero-sized variant or a pointer newtype.
1288 let zero_or_ptr_variant = |i: usize| {
1289 let fields = def.variants[i].fields.iter().map(|field| {
1290 SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env)
1293 for field in fields {
1296 SizeSkeleton::Known(size) => {
1297 if size.bytes() > 0 {
1301 SizeSkeleton::Pointer {..} => {
1312 let v0 = zero_or_ptr_variant(0)?;
1314 if def.variants.len() == 1 {
1315 if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1316 return Ok(SizeSkeleton::Pointer {
1317 non_zero: non_zero ||
1318 Some(def.did) == tcx.lang_items().non_zero(),
1326 let v1 = zero_or_ptr_variant(1)?;
1327 // Nullable pointer enum optimization.
1329 (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None) |
1330 (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
1331 Ok(SizeSkeleton::Pointer {
1340 ty::TyProjection(_) | ty::TyAnon(..) => {
1341 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1342 if ty == normalized {
1345 SizeSkeleton::compute(normalized, tcx, param_env)
1353 pub fn same_size(self, other: SizeSkeleton) -> bool {
1354 match (self, other) {
1355 (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
1356 (SizeSkeleton::Pointer { tail: a, .. },
1357 SizeSkeleton::Pointer { tail: b, .. }) => a == b,
1363 pub trait HasTyCtxt<'tcx>: HasDataLayout {
1364 fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx>;
1367 impl<'a, 'gcx, 'tcx> HasDataLayout for TyCtxt<'a, 'gcx, 'tcx> {
1368 fn data_layout(&self) -> &TargetDataLayout {
1373 impl<'a, 'gcx, 'tcx> HasTyCtxt<'gcx> for TyCtxt<'a, 'gcx, 'tcx> {
1374 fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> {
1379 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
1380 fn data_layout(&self) -> &TargetDataLayout {
1381 self.tcx.data_layout()
1385 impl<'gcx, 'tcx, T: HasTyCtxt<'gcx>> HasTyCtxt<'gcx> for LayoutCx<'tcx, T> {
1386 fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> {
1391 pub trait MaybeResult<T> {
1392 fn from_ok(x: T) -> Self;
1393 fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self;
1396 impl<T> MaybeResult<T> for T {
1397 fn from_ok(x: T) -> Self {
1400 fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self {
1405 impl<T, E> MaybeResult<T> for Result<T, E> {
1406 fn from_ok(x: T) -> Self {
1409 fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self {
1414 pub type TyLayout<'tcx> = ::rustc_target::abi::TyLayout<'tcx, Ty<'tcx>>;
1416 impl<'a, 'tcx> LayoutOf for LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
1418 type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
1420 /// Computes the layout of a type. Note that this implicitly
1421 /// executes in "reveal all" mode.
1422 fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
1423 let param_env = self.param_env.with_reveal_all();
1424 let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1425 let details = self.tcx.layout_raw(param_env.and(ty))?;
1426 let layout = TyLayout {
1431 // NB: This recording is normally disabled; when enabled, it
1432 // can however trigger recursive invocations of `layout_of`.
1433 // Therefore, we execute it *after* the main query has
1434 // completed, to avoid problems around recursive structures
1435 // and the like. (Admittedly, I wasn't able to reproduce a problem
1436 // here, but it seems like the right thing to do. -nmatsakis)
1437 self.record_layout_for_printing(layout);
1443 impl<'a, 'tcx> LayoutOf for LayoutCx<'tcx, ty::maps::TyCtxtAt<'a, 'tcx, 'tcx>> {
1445 type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
1447 /// Computes the layout of a type. Note that this implicitly
1448 /// executes in "reveal all" mode.
1449 fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
1450 let param_env = self.param_env.with_reveal_all();
1451 let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1452 let details = self.tcx.layout_raw(param_env.and(ty))?;
1453 let layout = TyLayout {
1458 // NB: This recording is normally disabled; when enabled, it
1459 // can however trigger recursive invocations of `layout_of`.
1460 // Therefore, we execute it *after* the main query has
1461 // completed, to avoid problems around recursive structures
1462 // and the like. (Admittedly, I wasn't able to reproduce a problem
1463 // here, but it seems like the right thing to do. -nmatsakis)
1466 param_env: self.param_env
1468 cx.record_layout_for_printing(layout);
1474 // Helper (inherent) `layout_of` methods to avoid pushing `LayoutCx` to users.
1475 impl<'a, 'tcx> TyCtxt<'a, 'tcx, 'tcx> {
1476 /// Computes the layout of a type. Note that this implicitly
1477 /// executes in "reveal all" mode.
1479 pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
1480 -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
1483 param_env: param_env_and_ty.param_env
1485 cx.layout_of(param_env_and_ty.value)
1489 impl<'a, 'tcx> ty::maps::TyCtxtAt<'a, 'tcx, 'tcx> {
1490 /// Computes the layout of a type. Note that this implicitly
1491 /// executes in "reveal all" mode.
1493 pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
1494 -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
1497 param_env: param_env_and_ty.param_env
1499 cx.layout_of(param_env_and_ty.value)
1503 impl<'a, 'tcx, C> TyLayoutMethods<'tcx, C> for Ty<'tcx>
1504 where C: LayoutOf<Ty = Ty<'tcx>> + HasTyCtxt<'tcx>,
1505 C::TyLayout: MaybeResult<TyLayout<'tcx>>
1507 fn for_variant(this: TyLayout<'tcx>, cx: C, variant_index: usize) -> TyLayout<'tcx> {
1508 let details = match this.variants {
1509 Variants::Single { index } if index == variant_index => this.details,
1511 Variants::Single { index } => {
1512 // Deny calling for_variant more than once for non-Single enums.
1513 cx.layout_of(this.ty).map_same(|layout| {
1514 assert_eq!(layout.variants, Variants::Single { index });
1518 let fields = match this.ty.sty {
1519 ty::TyAdt(def, _) => def.variants[variant_index].fields.len(),
1522 let mut details = LayoutDetails::uninhabited(fields);
1523 details.variants = Variants::Single { index: variant_index };
1524 cx.tcx().intern_layout(details)
1527 Variants::NicheFilling { ref variants, .. } |
1528 Variants::Tagged { ref variants, .. } => {
1529 &variants[variant_index]
1533 assert_eq!(details.variants, Variants::Single { index: variant_index });
1541 fn field(this: TyLayout<'tcx>, cx: C, i: usize) -> C::TyLayout {
1543 cx.layout_of(match this.ty.sty {
1552 ty::TyGeneratorWitness(..) |
1554 ty::TyDynamic(..) => {
1555 bug!("TyLayout::field_type({:?}): not applicable", this)
1558 // Potentially-fat pointers.
1559 ty::TyRef(_, ty::TypeAndMut { ty: pointee, .. }) |
1560 ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1563 // Reuse the fat *T type as its own thin pointer data field.
1564 // This provides information about e.g. DST struct pointees
1565 // (which may have no non-DST form), and will work as long
1566 // as the `Abi` or `FieldPlacement` is checked by users.
1568 let nil = tcx.mk_nil();
1569 let ptr_ty = if this.ty.is_unsafe_ptr() {
1572 tcx.mk_mut_ref(tcx.types.re_static, nil)
1574 return cx.layout_of(ptr_ty).map_same(|mut ptr_layout| {
1575 ptr_layout.ty = this.ty;
1580 match tcx.struct_tail(pointee).sty {
1582 ty::TyStr => tcx.types.usize,
1583 ty::TyDynamic(..) => {
1584 // FIXME(eddyb) use an usize/fn() array with
1585 // the correct number of vtables slots.
1586 tcx.mk_imm_ref(tcx.types.re_static, tcx.mk_nil())
1588 _ => bug!("TyLayout::field_type({:?}): not applicable", this)
1592 // Arrays and slices.
1593 ty::TyArray(element, _) |
1594 ty::TySlice(element) => element,
1595 ty::TyStr => tcx.types.u8,
1597 // Tuples, generators and closures.
1598 ty::TyClosure(def_id, ref substs) => {
1599 substs.upvar_tys(def_id, tcx).nth(i).unwrap()
1602 ty::TyGenerator(def_id, ref substs, _) => {
1603 substs.field_tys(def_id, tcx).nth(i).unwrap()
1606 ty::TyTuple(tys) => tys[i],
1608 // SIMD vector types.
1609 ty::TyAdt(def, ..) if def.repr.simd() => {
1610 this.ty.simd_type(tcx)
1614 ty::TyAdt(def, substs) => {
1615 match this.variants {
1616 Variants::Single { index } => {
1617 def.variants[index].fields[i].ty(tcx, substs)
1620 // Discriminant field for enums (where applicable).
1621 Variants::Tagged { ref discr, .. } |
1622 Variants::NicheFilling { niche: ref discr, .. } => {
1624 let layout = LayoutDetails::scalar(tcx, discr.clone());
1625 return MaybeResult::from_ok(TyLayout {
1626 details: tcx.intern_layout(layout),
1627 ty: discr.value.to_ty(tcx)
1633 ty::TyProjection(_) | ty::TyAnon(..) | ty::TyParam(_) |
1634 ty::TyInfer(_) | ty::TyError => {
1635 bug!("TyLayout::field_type: unexpected type `{}`", this.ty)
1641 impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
1642 /// Find the offset of a niche leaf field, starting from
1643 /// the given type and recursing through aggregates, which
1644 /// has at least `count` consecutive invalid values.
1645 /// The tuple is `(offset, scalar, niche_value)`.
1646 // FIXME(eddyb) traverse already optimized enums.
1647 fn find_niche(self, layout: TyLayout<'tcx>, count: u128)
1648 -> Result<Option<(Size, Scalar, u128)>, LayoutError<'tcx>>
1650 let scalar_component = |scalar: &Scalar, offset| {
1651 let Scalar { value, valid_range: ref v } = *scalar;
1653 let bits = value.size(self).bits();
1654 assert!(bits <= 128);
1655 let max_value = !0u128 >> (128 - bits);
1657 // Find out how many values are outside the valid range.
1658 let niches = if v.start() <= v.end() {
1659 v.start() + (max_value - v.end())
1661 v.start() - v.end() - 1
1664 // Give up if we can't fit `count` consecutive niches.
1669 let niche_start = v.end().wrapping_add(1) & max_value;
1670 let niche_end = v.end().wrapping_add(count) & max_value;
1671 Some((offset, Scalar {
1673 valid_range: *v.start()..=niche_end
1677 // Locals variables which live across yields are stored
1678 // in the generator type as fields. These may be uninitialized
1679 // so we don't look for niches there.
1680 if let ty::TyGenerator(..) = layout.ty.sty {
1685 Abi::Scalar(ref scalar) => {
1686 return Ok(scalar_component(scalar, Size::from_bytes(0)));
1688 Abi::ScalarPair(ref a, ref b) => {
1689 return Ok(scalar_component(a, Size::from_bytes(0)).or_else(|| {
1690 scalar_component(b, a.value.size(self).abi_align(b.value.align(self)))
1693 Abi::Vector { ref element, .. } => {
1694 return Ok(scalar_component(element, Size::from_bytes(0)));
1699 // Perhaps one of the fields is non-zero, let's recurse and find out.
1700 if let FieldPlacement::Union(_) = layout.fields {
1701 // Only Rust enums have safe-to-inspect fields
1702 // (a discriminant), other unions are unsafe.
1703 if let Variants::Single { .. } = layout.variants {
1707 if let FieldPlacement::Array { .. } = layout.fields {
1708 if layout.fields.count() > 0 {
1709 return self.find_niche(layout.field(self, 0)?, count);
1712 for i in 0..layout.fields.count() {
1713 let r = self.find_niche(layout.field(self, i)?, count)?;
1714 if let Some((offset, scalar, niche_value)) = r {
1715 let offset = layout.fields.offset(i) + offset;
1716 return Ok(Some((offset, scalar, niche_value)));
1723 impl<'a> HashStable<StableHashingContext<'a>> for Variants {
1724 fn hash_stable<W: StableHasherResult>(&self,
1725 hcx: &mut StableHashingContext<'a>,
1726 hasher: &mut StableHasher<W>) {
1727 use ty::layout::Variants::*;
1728 mem::discriminant(self).hash_stable(hcx, hasher);
1731 Single { index } => {
1732 index.hash_stable(hcx, hasher);
1738 discr.hash_stable(hcx, hasher);
1739 variants.hash_stable(hcx, hasher);
1748 dataful_variant.hash_stable(hcx, hasher);
1749 niche_variants.start().hash_stable(hcx, hasher);
1750 niche_variants.end().hash_stable(hcx, hasher);
1751 niche.hash_stable(hcx, hasher);
1752 niche_start.hash_stable(hcx, hasher);
1753 variants.hash_stable(hcx, hasher);
1759 impl<'a> HashStable<StableHashingContext<'a>> for FieldPlacement {
1760 fn hash_stable<W: StableHasherResult>(&self,
1761 hcx: &mut StableHashingContext<'a>,
1762 hasher: &mut StableHasher<W>) {
1763 use ty::layout::FieldPlacement::*;
1764 mem::discriminant(self).hash_stable(hcx, hasher);
1768 count.hash_stable(hcx, hasher);
1770 Array { count, stride } => {
1771 count.hash_stable(hcx, hasher);
1772 stride.hash_stable(hcx, hasher);
1774 Arbitrary { ref offsets, ref memory_index } => {
1775 offsets.hash_stable(hcx, hasher);
1776 memory_index.hash_stable(hcx, hasher);
1782 impl<'a> HashStable<StableHashingContext<'a>> for Abi {
1783 fn hash_stable<W: StableHasherResult>(&self,
1784 hcx: &mut StableHashingContext<'a>,
1785 hasher: &mut StableHasher<W>) {
1786 use ty::layout::Abi::*;
1787 mem::discriminant(self).hash_stable(hcx, hasher);
1791 Scalar(ref value) => {
1792 value.hash_stable(hcx, hasher);
1794 ScalarPair(ref a, ref b) => {
1795 a.hash_stable(hcx, hasher);
1796 b.hash_stable(hcx, hasher);
1798 Vector { ref element, count } => {
1799 element.hash_stable(hcx, hasher);
1800 count.hash_stable(hcx, hasher);
1802 Aggregate { sized } => {
1803 sized.hash_stable(hcx, hasher);
1809 impl<'a> HashStable<StableHashingContext<'a>> for Scalar {
1810 fn hash_stable<W: StableHasherResult>(&self,
1811 hcx: &mut StableHashingContext<'a>,
1812 hasher: &mut StableHasher<W>) {
1813 let Scalar { value, ref valid_range } = *self;
1814 value.hash_stable(hcx, hasher);
1815 valid_range.start().hash_stable(hcx, hasher);
1816 valid_range.end().hash_stable(hcx, hasher);
1820 impl_stable_hash_for!(struct ::ty::layout::LayoutDetails {
1828 impl_stable_hash_for!(enum ::ty::layout::Integer {
1836 impl_stable_hash_for!(enum ::ty::layout::Primitive {
1837 Int(integer, signed),
1843 impl<'gcx> HashStable<StableHashingContext<'gcx>> for Align {
1844 fn hash_stable<W: StableHasherResult>(&self,
1845 hcx: &mut StableHashingContext<'gcx>,
1846 hasher: &mut StableHasher<W>) {
1847 self.abi().hash_stable(hcx, hasher);
1848 self.pref().hash_stable(hcx, hasher);
1852 impl<'gcx> HashStable<StableHashingContext<'gcx>> for Size {
1853 fn hash_stable<W: StableHasherResult>(&self,
1854 hcx: &mut StableHashingContext<'gcx>,
1855 hasher: &mut StableHasher<W>) {
1856 self.bytes().hash_stable(hcx, hasher);
1860 impl<'a, 'gcx> HashStable<StableHashingContext<'a>> for LayoutError<'gcx>
1862 fn hash_stable<W: StableHasherResult>(&self,
1863 hcx: &mut StableHashingContext<'a>,
1864 hasher: &mut StableHasher<W>) {
1865 use ty::layout::LayoutError::*;
1866 mem::discriminant(self).hash_stable(hcx, hasher);
1870 SizeOverflow(t) => t.hash_stable(hcx, hasher)