1 // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 use session::{self, DataTypeKind};
12 use ty::{self, Ty, TyCtxt, TypeFoldable, ReprOptions};
14 use syntax::ast::{self, IntTy, UintTy};
16 use syntax_pos::DUMMY_SP;
25 use ich::StableHashingContext;
26 use rustc_data_structures::indexed_vec::{IndexVec, Idx};
27 use rustc_data_structures::stable_hasher::{HashStable, StableHasher,
30 pub use rustc_target::abi::*;
32 pub trait IntegerExt {
33 fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, signed: bool) -> Ty<'tcx>;
34 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
35 fn repr_discr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
43 impl IntegerExt for Integer {
44 fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, signed: bool) -> Ty<'tcx> {
45 match (*self, signed) {
46 (I8, false) => tcx.types.u8,
47 (I16, false) => tcx.types.u16,
48 (I32, false) => tcx.types.u32,
49 (I64, false) => tcx.types.u64,
50 (I128, false) => tcx.types.u128,
51 (I8, true) => tcx.types.i8,
52 (I16, true) => tcx.types.i16,
53 (I32, true) => tcx.types.i32,
54 (I64, true) => tcx.types.i64,
55 (I128, true) => tcx.types.i128,
59 /// Get the Integer type from an attr::IntType.
60 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
61 let dl = cx.data_layout();
64 attr::SignedInt(IntTy::I8) | attr::UnsignedInt(UintTy::U8) => I8,
65 attr::SignedInt(IntTy::I16) | attr::UnsignedInt(UintTy::U16) => I16,
66 attr::SignedInt(IntTy::I32) | attr::UnsignedInt(UintTy::U32) => I32,
67 attr::SignedInt(IntTy::I64) | attr::UnsignedInt(UintTy::U64) => I64,
68 attr::SignedInt(IntTy::I128) | attr::UnsignedInt(UintTy::U128) => I128,
69 attr::SignedInt(IntTy::Isize) | attr::UnsignedInt(UintTy::Usize) => {
70 dl.ptr_sized_integer()
75 /// Find the appropriate Integer type and signedness for the given
76 /// signed discriminant range and #[repr] attribute.
77 /// N.B.: u128 values above i128::MAX will be treated as signed, but
78 /// that shouldn't affect anything, other than maybe debuginfo.
79 fn repr_discr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
85 // Theoretically, negative values could be larger in unsigned representation
86 // than the unsigned representation of the signed minimum. However, if there
87 // are any negative values, the only valid unsigned representation is u128
88 // which can fit all i128 values, so the result remains unaffected.
89 let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
90 let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
92 let mut min_from_extern = None;
95 if let Some(ity) = repr.int {
96 let discr = Integer::from_attr(&tcx, ity);
97 let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
99 bug!("Integer::repr_discr: `#[repr]` hint too small for \
100 discriminant range of enum `{}", ty)
102 return (discr, ity.is_signed());
106 match &tcx.sess.target.target.arch[..] {
107 // WARNING: the ARM EABI has two variants; the one corresponding
108 // to `at_least == I32` appears to be used on Linux and NetBSD,
109 // but some systems may use the variant corresponding to no
110 // lower bound. However, we don't run on those yet...?
111 "arm" => min_from_extern = Some(I32),
112 _ => min_from_extern = Some(I32),
116 let at_least = min_from_extern.unwrap_or(min_default);
118 // If there are no negative values, we can use the unsigned fit.
120 (cmp::max(unsigned_fit, at_least), false)
122 (cmp::max(signed_fit, at_least), true)
127 pub trait PrimitiveExt {
128 fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx>;
131 impl PrimitiveExt for Primitive {
132 fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx> {
134 Int(i, signed) => i.to_ty(tcx, signed),
135 Float(FloatTy::F32) => tcx.types.f32,
136 Float(FloatTy::F64) => tcx.types.f64,
137 Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
142 /// The first half of a fat pointer.
144 /// - For a trait object, this is the address of the box.
145 /// - For a slice, this is the base address.
146 pub const FAT_PTR_ADDR: usize = 0;
148 /// The second half of a fat pointer.
150 /// - For a trait object, this is the address of the vtable.
151 /// - For a slice, this is the length.
152 pub const FAT_PTR_EXTRA: usize = 1;
154 #[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable)]
155 pub enum LayoutError<'tcx> {
157 SizeOverflow(Ty<'tcx>)
160 impl<'tcx> fmt::Display for LayoutError<'tcx> {
161 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
163 LayoutError::Unknown(ty) => {
164 write!(f, "the type `{:?}` has an unknown layout", ty)
166 LayoutError::SizeOverflow(ty) => {
167 write!(f, "the type `{:?}` is too big for the current architecture", ty)
173 fn layout_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
174 query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
175 -> Result<&'tcx LayoutDetails, LayoutError<'tcx>>
177 ty::tls::with_related_context(tcx, move |icx| {
178 let rec_limit = *tcx.sess.recursion_limit.get();
179 let (param_env, ty) = query.into_parts();
181 if icx.layout_depth > rec_limit {
183 &format!("overflow representing the type `{}`", ty));
186 // Update the ImplicitCtxt to increase the layout_depth
187 let icx = ty::tls::ImplicitCtxt {
188 layout_depth: icx.layout_depth + 1,
192 ty::tls::enter_context(&icx, |_| {
193 let cx = LayoutCx { tcx, param_env };
194 cx.layout_raw_uncached(ty)
199 pub fn provide(providers: &mut ty::query::Providers<'_>) {
200 *providers = ty::query::Providers {
206 pub struct LayoutCx<'tcx, C> {
208 pub param_env: ty::ParamEnv<'tcx>
211 impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
212 fn layout_raw_uncached(&self, ty: Ty<'tcx>)
213 -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> {
215 let param_env = self.param_env;
216 let dl = self.data_layout();
217 let scalar_unit = |value: Primitive| {
218 let bits = value.size(dl).bits();
219 assert!(bits <= 128);
222 valid_range: 0..=(!0 >> (128 - bits))
225 let scalar = |value: Primitive| {
226 tcx.intern_layout(LayoutDetails::scalar(self, scalar_unit(value)))
228 let scalar_pair = |a: Scalar, b: Scalar| {
229 let align = a.value.align(dl).max(b.value.align(dl)).max(dl.aggregate_align);
230 let b_offset = a.value.size(dl).abi_align(b.value.align(dl));
231 let size = (b_offset + b.value.size(dl)).abi_align(align);
233 variants: Variants::Single { index: VariantIdx::new(0) },
234 fields: FieldPlacement::Arbitrary {
235 offsets: vec![Size::ZERO, b_offset],
236 memory_index: vec![0, 1]
238 abi: Abi::ScalarPair(a, b),
244 #[derive(Copy, Clone, Debug)]
246 /// A tuple, closure, or univariant which cannot be coerced to unsized.
248 /// A univariant, the last field of which may be coerced to unsized.
250 /// A univariant, but with a prefix of an arbitrary size & alignment (e.g. enum tag).
251 Prefixed(Size, Align),
254 let univariant_uninterned = |fields: &[TyLayout<'_>], repr: &ReprOptions, kind| {
255 let packed = repr.packed();
256 if packed && repr.align > 0 {
257 bug!("struct cannot be packed and aligned");
261 let pack = repr.pack as u64;
262 Align::from_bytes(pack, pack).unwrap()
265 let mut align = if packed {
271 let mut sized = true;
272 let mut offsets = vec![Size::ZERO; fields.len()];
273 let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
275 let mut optimize = !repr.inhibit_struct_field_reordering_opt();
276 if let StructKind::Prefixed(_, align) = kind {
277 optimize &= align.abi() == 1;
281 let end = if let StructKind::MaybeUnsized = kind {
286 let optimizing = &mut inverse_memory_index[..end];
287 let field_align = |f: &TyLayout<'_>| {
288 if packed { f.align.min(pack).abi() } else { f.align.abi() }
291 StructKind::AlwaysSized |
292 StructKind::MaybeUnsized => {
293 optimizing.sort_by_key(|&x| {
294 // Place ZSTs first to avoid "interesting offsets",
295 // especially with only one or two non-ZST fields.
296 let f = &fields[x as usize];
297 (!f.is_zst(), cmp::Reverse(field_align(f)))
300 StructKind::Prefixed(..) => {
301 optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
306 // inverse_memory_index holds field indices by increasing memory offset.
307 // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
308 // We now write field offsets to the corresponding offset slot;
309 // field 5 with offset 0 puts 0 in offsets[5].
310 // At the bottom of this function, we use inverse_memory_index to produce memory_index.
312 let mut offset = Size::ZERO;
314 if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
316 let prefix_align = prefix_align.min(pack);
317 align = align.max(prefix_align);
319 align = align.max(prefix_align);
321 offset = prefix_size.abi_align(prefix_align);
324 for &i in &inverse_memory_index {
325 let field = fields[i as usize];
327 bug!("univariant: field #{} of `{}` comes after unsized field",
331 if field.is_unsized() {
335 // Invariant: offset < dl.obj_size_bound() <= 1<<61
337 let field_pack = field.align.min(pack);
338 offset = offset.abi_align(field_pack);
339 align = align.max(field_pack);
342 offset = offset.abi_align(field.align);
343 align = align.max(field.align);
346 debug!("univariant offset: {:?} field: {:#?}", offset, field);
347 offsets[i as usize] = offset;
349 offset = offset.checked_add(field.size, dl)
350 .ok_or(LayoutError::SizeOverflow(ty))?;
354 let repr_align = repr.align as u64;
355 align = align.max(Align::from_bytes(repr_align, repr_align).unwrap());
356 debug!("univariant repr_align: {:?}", repr_align);
359 debug!("univariant min_size: {:?}", offset);
360 let min_size = offset;
362 // As stated above, inverse_memory_index holds field indices by increasing offset.
363 // This makes it an already-sorted view of the offsets vec.
364 // To invert it, consider:
365 // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
366 // Field 5 would be the first element, so memory_index is i:
367 // Note: if we didn't optimize, it's already right.
369 let mut memory_index;
371 memory_index = vec![0; inverse_memory_index.len()];
373 for i in 0..inverse_memory_index.len() {
374 memory_index[inverse_memory_index[i] as usize] = i as u32;
377 memory_index = inverse_memory_index;
380 let size = min_size.abi_align(align);
381 let mut abi = Abi::Aggregate { sized };
383 // Unpack newtype ABIs and find scalar pairs.
384 if sized && size.bytes() > 0 {
385 // All other fields must be ZSTs, and we need them to all start at 0.
386 let mut zst_offsets =
387 offsets.iter().enumerate().filter(|&(i, _)| fields[i].is_zst());
388 if zst_offsets.all(|(_, o)| o.bytes() == 0) {
389 let mut non_zst_fields =
390 fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
392 match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
393 // We have exactly one non-ZST field.
394 (Some((i, field)), None, None) => {
395 // Field fills the struct and it has a scalar or scalar pair ABI.
396 if offsets[i].bytes() == 0 &&
397 align.abi() == field.align.abi() &&
400 // For plain scalars, or vectors of them, we can't unpack
401 // newtypes for `#[repr(C)]`, as that affects C ABIs.
402 Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
403 abi = field.abi.clone();
405 // But scalar pairs are Rust-specific and get
406 // treated as aggregates by C ABIs anyway.
407 Abi::ScalarPair(..) => {
408 abi = field.abi.clone();
415 // Two non-ZST fields, and they're both scalars.
416 (Some((i, &TyLayout {
417 details: &LayoutDetails { abi: Abi::Scalar(ref a), .. }, ..
418 })), Some((j, &TyLayout {
419 details: &LayoutDetails { abi: Abi::Scalar(ref b), .. }, ..
421 // Order by the memory placement, not source order.
422 let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
427 let pair = scalar_pair(a.clone(), b.clone());
428 let pair_offsets = match pair.fields {
429 FieldPlacement::Arbitrary {
433 assert_eq!(memory_index, &[0, 1]);
438 if offsets[i] == pair_offsets[0] &&
439 offsets[j] == pair_offsets[1] &&
440 align == pair.align &&
442 // We can use `ScalarPair` only when it matches our
443 // already computed layout (including `#[repr(C)]`).
453 if sized && fields.iter().any(|f| f.abi.is_uninhabited()) {
454 abi = Abi::Uninhabited;
458 variants: Variants::Single { index: VariantIdx::new(0) },
459 fields: FieldPlacement::Arbitrary {
468 let univariant = |fields: &[TyLayout<'_>], repr: &ReprOptions, kind| {
469 Ok(tcx.intern_layout(univariant_uninterned(fields, repr, kind)?))
471 debug_assert!(!ty.has_infer_types());
476 tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
477 value: Int(I8, false),
482 tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
483 value: Int(I32, false),
484 valid_range: 0..=0x10FFFF
488 scalar(Int(Integer::from_attr(dl, attr::SignedInt(ity)), true))
491 scalar(Int(Integer::from_attr(dl, attr::UnsignedInt(ity)), false))
493 ty::Float(fty) => scalar(Float(fty)),
495 let mut ptr = scalar_unit(Pointer);
496 ptr.valid_range = 1..=*ptr.valid_range.end();
497 tcx.intern_layout(LayoutDetails::scalar(self, ptr))
502 tcx.intern_layout(LayoutDetails {
503 variants: Variants::Single { index: VariantIdx::new(0) },
504 fields: FieldPlacement::Union(0),
505 abi: Abi::Uninhabited,
511 // Potentially-fat pointers.
512 ty::Ref(_, pointee, _) |
513 ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
514 let mut data_ptr = scalar_unit(Pointer);
515 if !ty.is_unsafe_ptr() {
516 data_ptr.valid_range = 1..=*data_ptr.valid_range.end();
519 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
520 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
521 return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
524 let unsized_part = tcx.struct_tail(pointee);
525 let metadata = match unsized_part.sty {
527 return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
529 ty::Slice(_) | ty::Str => {
530 scalar_unit(Int(dl.ptr_sized_integer(), false))
533 let mut vtable = scalar_unit(Pointer);
534 vtable.valid_range = 1..=*vtable.valid_range.end();
537 _ => return Err(LayoutError::Unknown(unsized_part))
540 // Effectively a (ptr, meta) tuple.
541 tcx.intern_layout(scalar_pair(data_ptr, metadata))
544 // Arrays and slices.
545 ty::Array(element, mut count) => {
546 if count.has_projections() {
547 count = tcx.normalize_erasing_regions(param_env, count);
548 if count.has_projections() {
549 return Err(LayoutError::Unknown(ty));
553 let element = self.layout_of(element)?;
554 let count = count.unwrap_usize(tcx);
555 let size = element.size.checked_mul(count, dl)
556 .ok_or(LayoutError::SizeOverflow(ty))?;
558 tcx.intern_layout(LayoutDetails {
559 variants: Variants::Single { index: VariantIdx::new(0) },
560 fields: FieldPlacement::Array {
561 stride: element.size,
564 abi: Abi::Aggregate { sized: true },
565 align: element.align,
569 ty::Slice(element) => {
570 let element = self.layout_of(element)?;
571 tcx.intern_layout(LayoutDetails {
572 variants: Variants::Single { index: VariantIdx::new(0) },
573 fields: FieldPlacement::Array {
574 stride: element.size,
577 abi: Abi::Aggregate { sized: false },
578 align: element.align,
583 tcx.intern_layout(LayoutDetails {
584 variants: Variants::Single { index: VariantIdx::new(0) },
585 fields: FieldPlacement::Array {
586 stride: Size::from_bytes(1),
589 abi: Abi::Aggregate { sized: false },
597 univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?
599 ty::Dynamic(..) | ty::Foreign(..) => {
600 let mut unit = univariant_uninterned(&[], &ReprOptions::default(),
601 StructKind::AlwaysSized)?;
603 Abi::Aggregate { ref mut sized } => *sized = false,
606 tcx.intern_layout(unit)
609 // Tuples, generators and closures.
610 ty::Generator(def_id, ref substs, _) => {
611 let tys = substs.field_tys(def_id, tcx);
612 univariant(&tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
613 &ReprOptions::default(),
614 StructKind::AlwaysSized)?
617 ty::Closure(def_id, ref substs) => {
618 let tys = substs.upvar_tys(def_id, tcx);
619 univariant(&tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
620 &ReprOptions::default(),
621 StructKind::AlwaysSized)?
625 let kind = if tys.len() == 0 {
626 StructKind::AlwaysSized
628 StructKind::MaybeUnsized
631 univariant(&tys.iter().map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
632 &ReprOptions::default(), kind)?
635 // SIMD vector types.
636 ty::Adt(def, ..) if def.repr.simd() => {
637 let element = self.layout_of(ty.simd_type(tcx))?;
638 let count = ty.simd_size(tcx) as u64;
640 let scalar = match element.abi {
641 Abi::Scalar(ref scalar) => scalar.clone(),
643 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` with \
644 a non-machine element type `{}`",
648 let size = element.size.checked_mul(count, dl)
649 .ok_or(LayoutError::SizeOverflow(ty))?;
650 let align = dl.vector_align(size);
651 let size = size.abi_align(align);
653 tcx.intern_layout(LayoutDetails {
654 variants: Variants::Single { index: VariantIdx::new(0) },
655 fields: FieldPlacement::Array {
656 stride: element.size,
669 ty::Adt(def, substs) => {
670 // Cache the field layouts.
671 let variants = def.variants.iter().map(|v| {
672 v.fields.iter().map(|field| {
673 self.layout_of(field.ty(tcx, substs))
674 }).collect::<Result<Vec<_>, _>>()
675 }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
678 let packed = def.repr.packed();
679 if packed && def.repr.align > 0 {
680 bug!("Union cannot be packed and aligned");
684 let pack = def.repr.pack as u64;
685 Align::from_bytes(pack, pack).unwrap()
688 let mut align = if packed {
694 if def.repr.align > 0 {
695 let repr_align = def.repr.align as u64;
697 Align::from_bytes(repr_align, repr_align).unwrap());
700 let mut size = Size::ZERO;
701 let index = VariantIdx::new(0);
702 for field in &variants[index] {
703 assert!(!field.is_unsized());
706 let field_pack = field.align.min(pack);
707 align = align.max(field_pack);
709 align = align.max(field.align);
711 size = cmp::max(size, field.size);
714 return Ok(tcx.intern_layout(LayoutDetails {
715 variants: Variants::Single { index },
716 fields: FieldPlacement::Union(variants[index].len()),
717 abi: Abi::Aggregate { sized: true },
719 size: size.abi_align(align)
723 // A variant is absent if it's uninhabited and only has ZST fields.
724 // Present uninhabited variants only require space for their fields,
725 // but *not* an encoding of the discriminant (e.g. a tag value).
726 // See issue #49298 for more details on the need to leave space
727 // for non-ZST uninhabited data (mostly partial initialization).
728 let absent = |fields: &[TyLayout<'_>]| {
729 let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
730 let is_zst = fields.iter().all(|f| f.is_zst());
731 uninhabited && is_zst
733 let (present_first, present_second) = {
734 let mut present_variants = variants.iter_enumerated().filter_map(|(i, v)| {
741 (present_variants.next(), present_variants.next())
743 if present_first.is_none() {
744 // Uninhabited because it has no variants, or only absent ones.
745 return tcx.layout_raw(param_env.and(tcx.types.never));
748 let is_struct = !def.is_enum() ||
749 // Only one variant is present.
750 (present_second.is_none() &&
751 // Representation optimizations are allowed.
752 !def.repr.inhibit_enum_layout_opt());
754 // Struct, or univariant enum equivalent to a struct.
755 // (Typechecking will reject discriminant-sizing attrs.)
757 let v = present_first.unwrap();
758 let kind = if def.is_enum() || variants[v].len() == 0 {
759 StructKind::AlwaysSized
761 let param_env = tcx.param_env(def.did);
762 let last_field = def.variants[v].fields.last().unwrap();
763 let always_sized = tcx.type_of(last_field.did)
764 .is_sized(tcx.at(DUMMY_SP), param_env);
765 if !always_sized { StructKind::MaybeUnsized }
766 else { StructKind::AlwaysSized }
769 let mut st = univariant_uninterned(&variants[v], &def.repr, kind)?;
770 st.variants = Variants::Single { index: v };
771 let (start, end) = self.tcx.layout_scalar_valid_range(def.did);
773 Abi::Scalar(ref mut scalar) |
774 Abi::ScalarPair(ref mut scalar, _) => {
775 // the asserts ensure that we are not using the
776 // `#[rustc_layout_scalar_valid_range(n)]`
777 // attribute to widen the range of anything as that would probably
778 // result in UB somewhere
779 if let Bound::Included(start) = start {
780 assert!(*scalar.valid_range.start() <= start);
781 scalar.valid_range = start..=*scalar.valid_range.end();
783 if let Bound::Included(end) = end {
784 assert!(*scalar.valid_range.end() >= end);
785 scalar.valid_range = *scalar.valid_range.start()..=end;
789 start == Bound::Unbounded && end == Bound::Unbounded,
790 "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
795 return Ok(tcx.intern_layout(st));
798 // The current code for niche-filling relies on variant indices
799 // instead of actual discriminants, so dataful enums with
800 // explicit discriminants (RFC #2363) would misbehave.
801 let no_explicit_discriminants = def.variants.iter_enumerated()
802 .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
804 // Niche-filling enum optimization.
805 if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
806 let mut dataful_variant = None;
807 let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
809 // Find one non-ZST variant.
810 'variants: for (v, fields) in variants.iter_enumerated() {
816 if dataful_variant.is_none() {
817 dataful_variant = Some(v);
820 dataful_variant = None;
825 niche_variants = *niche_variants.start().min(&v)..=v;
828 if niche_variants.start() > niche_variants.end() {
829 dataful_variant = None;
832 if let Some(i) = dataful_variant {
834 niche_variants.end().as_u32() - niche_variants.start().as_u32() + 1
836 for (field_index, &field) in variants[i].iter().enumerate() {
837 let niche = match self.find_niche(field)? {
838 Some(niche) => niche,
841 let (niche_start, niche_scalar) = match niche.reserve(self, count) {
846 let mut align = dl.aggregate_align;
847 let st = variants.iter_enumerated().map(|(j, v)| {
848 let mut st = univariant_uninterned(v,
849 &def.repr, StructKind::AlwaysSized)?;
850 st.variants = Variants::Single { index: j };
852 align = align.max(st.align);
855 }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
857 let offset = st[i].fields.offset(field_index) + niche.offset;
858 let size = st[i].size;
860 let mut abi = match st[i].abi {
861 Abi::Scalar(_) => Abi::Scalar(niche_scalar.clone()),
862 Abi::ScalarPair(ref first, ref second) => {
863 // We need to use scalar_unit to reset the
864 // valid range to the maximal one for that
865 // primitive, because only the niche is
866 // guaranteed to be initialised, not the
868 if offset.bytes() == 0 {
870 niche_scalar.clone(),
871 scalar_unit(second.value),
875 scalar_unit(first.value),
876 niche_scalar.clone(),
880 _ => Abi::Aggregate { sized: true },
883 if st.iter().all(|v| v.abi.is_uninhabited()) {
884 abi = Abi::Uninhabited;
887 return Ok(tcx.intern_layout(LayoutDetails {
888 variants: Variants::NicheFilling {
895 fields: FieldPlacement::Arbitrary {
896 offsets: vec![offset],
897 memory_index: vec![0]
907 let (mut min, mut max) = (i128::max_value(), i128::min_value());
908 let discr_type = def.repr.discr_type();
909 let bits = Integer::from_attr(self, discr_type).size().bits();
910 for (i, discr) in def.discriminants(tcx) {
911 if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
914 let mut x = discr.val as i128;
915 if discr_type.is_signed() {
916 // sign extend the raw representation to be an i128
917 x = (x << (128 - bits)) >> (128 - bits);
919 if x < min { min = x; }
920 if x > max { max = x; }
922 // We might have no inhabited variants, so pretend there's at least one.
923 if (min, max) == (i128::max_value(), i128::min_value()) {
927 assert!(min <= max, "discriminant range is {}...{}", min, max);
928 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
930 let mut align = dl.aggregate_align;
931 let mut size = Size::ZERO;
933 // We're interested in the smallest alignment, so start large.
934 let mut start_align = Align::from_bytes(256, 256).unwrap();
935 assert_eq!(Integer::for_abi_align(dl, start_align), None);
937 // repr(C) on an enum tells us to make a (tag, union) layout,
938 // so we need to grow the prefix alignment to be at least
939 // the alignment of the union. (This value is used both for
940 // determining the alignment of the overall enum, and the
941 // determining the alignment of the payload after the tag.)
942 let mut prefix_align = min_ity.align(dl);
944 for fields in &variants {
945 for field in fields {
946 prefix_align = prefix_align.max(field.align);
951 // Create the set of structs that represent each variant.
952 let mut layout_variants = variants.iter_enumerated().map(|(i, field_layouts)| {
953 let mut st = univariant_uninterned(&field_layouts,
954 &def.repr, StructKind::Prefixed(min_ity.size(), prefix_align))?;
955 st.variants = Variants::Single { index: i };
956 // Find the first field we can't move later
957 // to make room for a larger discriminant.
958 for field in st.fields.index_by_increasing_offset().map(|j| field_layouts[j]) {
959 if !field.is_zst() || field.align.abi() != 1 {
960 start_align = start_align.min(field.align);
964 size = cmp::max(size, st.size);
965 align = align.max(st.align);
967 }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
969 // Align the maximum variant size to the largest alignment.
970 size = size.abi_align(align);
972 if size.bytes() >= dl.obj_size_bound() {
973 return Err(LayoutError::SizeOverflow(ty));
976 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
977 if typeck_ity < min_ity {
978 // It is a bug if Layout decided on a greater discriminant size than typeck for
979 // some reason at this point (based on values discriminant can take on). Mostly
980 // because this discriminant will be loaded, and then stored into variable of
981 // type calculated by typeck. Consider such case (a bug): typeck decided on
982 // byte-sized discriminant, but layout thinks we need a 16-bit to store all
983 // discriminant values. That would be a bug, because then, in codegen, in order
984 // to store this 16-bit discriminant into 8-bit sized temporary some of the
985 // space necessary to represent would have to be discarded (or layout is wrong
986 // on thinking it needs 16 bits)
987 bug!("layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
988 min_ity, typeck_ity);
989 // However, it is fine to make discr type however large (as an optimisation)
990 // after this point – we’ll just truncate the value we load in codegen.
993 // Check to see if we should use a different type for the
994 // discriminant. We can safely use a type with the same size
995 // as the alignment of the first field of each variant.
996 // We increase the size of the discriminant to avoid LLVM copying
997 // padding when it doesn't need to. This normally causes unaligned
998 // load/stores and excessive memcpy/memset operations. By using a
999 // bigger integer size, LLVM can be sure about its contents and
1000 // won't be so conservative.
1002 // Use the initial field alignment
1003 let mut ity = if def.repr.c() || def.repr.int.is_some() {
1006 Integer::for_abi_align(dl, start_align).unwrap_or(min_ity)
1009 // If the alignment is not larger than the chosen discriminant size,
1010 // don't use the alignment as the final size.
1014 // Patch up the variants' first few fields.
1015 let old_ity_size = min_ity.size();
1016 let new_ity_size = ity.size();
1017 for variant in &mut layout_variants {
1018 match variant.fields {
1019 FieldPlacement::Arbitrary { ref mut offsets, .. } => {
1021 if *i <= old_ity_size {
1022 assert_eq!(*i, old_ity_size);
1026 // We might be making the struct larger.
1027 if variant.size <= old_ity_size {
1028 variant.size = new_ity_size;
1036 let tag_mask = !0u128 >> (128 - ity.size().bits());
1038 value: Int(ity, signed),
1039 valid_range: (min as u128 & tag_mask)..=(max as u128 & tag_mask),
1041 let mut abi = Abi::Aggregate { sized: true };
1042 if tag.value.size(dl) == size {
1043 abi = Abi::Scalar(tag.clone());
1045 // Try to use a ScalarPair for all tagged enums.
1046 let mut common_prim = None;
1047 for (field_layouts, layout_variant) in variants.iter().zip(&layout_variants) {
1048 let offsets = match layout_variant.fields {
1049 FieldPlacement::Arbitrary { ref offsets, .. } => offsets,
1052 let mut fields = field_layouts
1055 .filter(|p| !p.0.is_zst());
1056 let (field, offset) = match (fields.next(), fields.next()) {
1057 (None, None) => continue,
1058 (Some(pair), None) => pair,
1064 let prim = match field.details.abi {
1065 Abi::Scalar(ref scalar) => scalar.value,
1071 if let Some(pair) = common_prim {
1072 // This is pretty conservative. We could go fancier
1073 // by conflating things like i32 and u32, or even
1074 // realising that (u8, u8) could just cohabit with
1076 if pair != (prim, offset) {
1081 common_prim = Some((prim, offset));
1084 if let Some((prim, offset)) = common_prim {
1085 let pair = scalar_pair(tag.clone(), scalar_unit(prim));
1086 let pair_offsets = match pair.fields {
1087 FieldPlacement::Arbitrary {
1091 assert_eq!(memory_index, &[0, 1]);
1096 if pair_offsets[0] == Size::ZERO &&
1097 pair_offsets[1] == *offset &&
1098 align == pair.align &&
1100 // We can use `ScalarPair` only when it matches our
1101 // already computed layout (including `#[repr(C)]`).
1107 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1108 abi = Abi::Uninhabited;
1111 tcx.intern_layout(LayoutDetails {
1112 variants: Variants::Tagged {
1114 variants: layout_variants,
1116 fields: FieldPlacement::Arbitrary {
1117 offsets: vec![Size::ZERO],
1118 memory_index: vec![0]
1126 // Types with no meaningful known layout.
1127 ty::Projection(_) | ty::Opaque(..) => {
1128 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1129 if ty == normalized {
1130 return Err(LayoutError::Unknown(ty));
1132 tcx.layout_raw(param_env.and(normalized))?
1136 ty::UnnormalizedProjection(..) |
1137 ty::GeneratorWitness(..) |
1139 bug!("LayoutDetails::compute: unexpected type `{}`", ty)
1142 ty::Param(_) | ty::Error => {
1143 return Err(LayoutError::Unknown(ty));
1148 /// This is invoked by the `layout_raw` query to record the final
1149 /// layout of each type.
1151 fn record_layout_for_printing(&self, layout: TyLayout<'tcx>) {
1152 // If we are running with `-Zprint-type-sizes`, record layouts for
1153 // dumping later. Ignore layouts that are done with non-empty
1154 // environments or non-monomorphic layouts, as the user only wants
1155 // to see the stuff resulting from the final codegen session.
1157 !self.tcx.sess.opts.debugging_opts.print_type_sizes ||
1158 layout.ty.has_param_types() ||
1159 layout.ty.has_self_ty() ||
1160 !self.param_env.caller_bounds.is_empty()
1165 self.record_layout_for_printing_outlined(layout)
1168 fn record_layout_for_printing_outlined(&self, layout: TyLayout<'tcx>) {
1169 // (delay format until we actually need it)
1170 let record = |kind, packed, opt_discr_size, variants| {
1171 let type_desc = format!("{:?}", layout.ty);
1172 self.tcx.sess.code_stats.borrow_mut().record_type_size(kind,
1181 let adt_def = match layout.ty.sty {
1182 ty::Adt(ref adt_def, _) => {
1183 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1187 ty::Closure(..) => {
1188 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1189 record(DataTypeKind::Closure, false, None, vec![]);
1194 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1199 let adt_kind = adt_def.adt_kind();
1200 let adt_packed = adt_def.repr.packed();
1202 let build_variant_info = |n: Option<ast::Name>,
1204 layout: TyLayout<'tcx>| {
1205 let mut min_size = Size::ZERO;
1206 let field_info: Vec<_> = flds.iter().enumerate().map(|(i, &name)| {
1207 match layout.field(self, i) {
1209 bug!("no layout found for field {}: `{:?}`", name, err);
1211 Ok(field_layout) => {
1212 let offset = layout.fields.offset(i);
1213 let field_end = offset + field_layout.size;
1214 if min_size < field_end {
1215 min_size = field_end;
1217 session::FieldInfo {
1218 name: name.to_string(),
1219 offset: offset.bytes(),
1220 size: field_layout.size.bytes(),
1221 align: field_layout.align.abi(),
1227 session::VariantInfo {
1228 name: n.map(|n|n.to_string()),
1229 kind: if layout.is_unsized() {
1230 session::SizeKind::Min
1232 session::SizeKind::Exact
1234 align: layout.align.abi(),
1235 size: if min_size.bytes() == 0 {
1244 match layout.variants {
1245 Variants::Single { index } => {
1246 debug!("print-type-size `{:#?}` variant {}",
1247 layout, adt_def.variants[index].name);
1248 if !adt_def.variants.is_empty() {
1249 let variant_def = &adt_def.variants[index];
1250 let fields: Vec<_> =
1251 variant_def.fields.iter().map(|f| f.ident.name).collect();
1252 record(adt_kind.into(),
1255 vec![build_variant_info(Some(variant_def.name),
1259 // (This case arises for *empty* enums; so give it
1261 record(adt_kind.into(), adt_packed, None, vec![]);
1265 Variants::NicheFilling { .. } |
1266 Variants::Tagged { .. } => {
1267 debug!("print-type-size `{:#?}` adt general variants def {}",
1268 layout.ty, adt_def.variants.len());
1269 let variant_infos: Vec<_> =
1270 adt_def.variants.iter_enumerated().map(|(i, variant_def)| {
1271 let fields: Vec<_> =
1272 variant_def.fields.iter().map(|f| f.ident.name).collect();
1273 build_variant_info(Some(variant_def.name),
1275 layout.for_variant(self, i))
1278 record(adt_kind.into(), adt_packed, match layout.variants {
1279 Variants::Tagged { ref tag, .. } => Some(tag.value.size(self)),
1287 /// Type size "skeleton", i.e. the only information determining a type's size.
1288 /// While this is conservative, (aside from constant sizes, only pointers,
1289 /// newtypes thereof and null pointer optimized enums are allowed), it is
1290 /// enough to statically check common usecases of transmute.
1291 #[derive(Copy, Clone, Debug)]
1292 pub enum SizeSkeleton<'tcx> {
1293 /// Any statically computable Layout.
1296 /// A potentially-fat pointer.
1298 /// If true, this pointer is never null.
1300 /// The type which determines the unsized metadata, if any,
1301 /// of this pointer. Either a type parameter or a projection
1302 /// depending on one, with regions erased.
1307 impl<'a, 'tcx> SizeSkeleton<'tcx> {
1308 pub fn compute(ty: Ty<'tcx>,
1309 tcx: TyCtxt<'a, 'tcx, 'tcx>,
1310 param_env: ty::ParamEnv<'tcx>)
1311 -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1312 debug_assert!(!ty.has_infer_types());
1314 // First try computing a static layout.
1315 let err = match tcx.layout_of(param_env.and(ty)) {
1317 return Ok(SizeSkeleton::Known(layout.size));
1323 ty::Ref(_, pointee, _) |
1324 ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1325 let non_zero = !ty.is_unsafe_ptr();
1326 let tail = tcx.struct_tail(pointee);
1328 ty::Param(_) | ty::Projection(_) => {
1329 debug_assert!(tail.has_param_types() || tail.has_self_ty());
1330 Ok(SizeSkeleton::Pointer {
1332 tail: tcx.erase_regions(&tail)
1336 bug!("SizeSkeleton::compute({}): layout errored ({}), yet \
1337 tail `{}` is not a type parameter or a projection",
1343 ty::Adt(def, substs) => {
1344 // Only newtypes and enums w/ nullable pointer optimization.
1345 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
1349 // Get a zero-sized variant or a pointer newtype.
1350 let zero_or_ptr_variant = |i| {
1351 let i = VariantIdx::new(i);
1352 let fields = def.variants[i].fields.iter().map(|field| {
1353 SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env)
1356 for field in fields {
1359 SizeSkeleton::Known(size) => {
1360 if size.bytes() > 0 {
1364 SizeSkeleton::Pointer {..} => {
1375 let v0 = zero_or_ptr_variant(0)?;
1377 if def.variants.len() == 1 {
1378 if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1379 return Ok(SizeSkeleton::Pointer {
1380 non_zero: non_zero || match tcx.layout_scalar_valid_range(def.did) {
1381 (Bound::Included(start), Bound::Unbounded) => start > 0,
1382 (Bound::Included(start), Bound::Included(end)) =>
1383 0 < start && start < end,
1393 let v1 = zero_or_ptr_variant(1)?;
1394 // Nullable pointer enum optimization.
1396 (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None) |
1397 (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
1398 Ok(SizeSkeleton::Pointer {
1407 ty::Projection(_) | ty::Opaque(..) => {
1408 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1409 if ty == normalized {
1412 SizeSkeleton::compute(normalized, tcx, param_env)
1420 pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
1421 match (self, other) {
1422 (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
1423 (SizeSkeleton::Pointer { tail: a, .. },
1424 SizeSkeleton::Pointer { tail: b, .. }) => a == b,
1430 pub trait HasTyCtxt<'tcx>: HasDataLayout {
1431 fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx>;
1434 impl<'a, 'gcx, 'tcx> HasDataLayout for TyCtxt<'a, 'gcx, 'tcx> {
1435 fn data_layout(&self) -> &TargetDataLayout {
1440 impl<'a, 'gcx, 'tcx> HasTyCtxt<'gcx> for TyCtxt<'a, 'gcx, 'tcx> {
1441 fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> {
1446 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
1447 fn data_layout(&self) -> &TargetDataLayout {
1448 self.tcx.data_layout()
1452 impl<'gcx, 'tcx, T: HasTyCtxt<'gcx>> HasTyCtxt<'gcx> for LayoutCx<'tcx, T> {
1453 fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> {
1458 pub trait MaybeResult<T> {
1459 fn from_ok(x: T) -> Self;
1460 fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self;
1463 impl<T> MaybeResult<T> for T {
1464 fn from_ok(x: T) -> Self {
1467 fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self {
1472 impl<T, E> MaybeResult<T> for Result<T, E> {
1473 fn from_ok(x: T) -> Self {
1476 fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self {
1481 pub type TyLayout<'tcx> = ::rustc_target::abi::TyLayout<'tcx, Ty<'tcx>>;
1483 impl<'a, 'tcx> LayoutOf for LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
1485 type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
1487 /// Computes the layout of a type. Note that this implicitly
1488 /// executes in "reveal all" mode.
1489 fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
1490 let param_env = self.param_env.with_reveal_all();
1491 let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1492 let details = self.tcx.layout_raw(param_env.and(ty))?;
1493 let layout = TyLayout {
1498 // NB: This recording is normally disabled; when enabled, it
1499 // can however trigger recursive invocations of `layout_of`.
1500 // Therefore, we execute it *after* the main query has
1501 // completed, to avoid problems around recursive structures
1502 // and the like. (Admittedly, I wasn't able to reproduce a problem
1503 // here, but it seems like the right thing to do. -nmatsakis)
1504 self.record_layout_for_printing(layout);
1510 impl<'a, 'tcx> LayoutOf for LayoutCx<'tcx, ty::query::TyCtxtAt<'a, 'tcx, 'tcx>> {
1512 type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
1514 /// Computes the layout of a type. Note that this implicitly
1515 /// executes in "reveal all" mode.
1516 fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
1517 let param_env = self.param_env.with_reveal_all();
1518 let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1519 let details = self.tcx.layout_raw(param_env.and(ty))?;
1520 let layout = TyLayout {
1525 // NB: This recording is normally disabled; when enabled, it
1526 // can however trigger recursive invocations of `layout_of`.
1527 // Therefore, we execute it *after* the main query has
1528 // completed, to avoid problems around recursive structures
1529 // and the like. (Admittedly, I wasn't able to reproduce a problem
1530 // here, but it seems like the right thing to do. -nmatsakis)
1533 param_env: self.param_env
1535 cx.record_layout_for_printing(layout);
1541 // Helper (inherent) `layout_of` methods to avoid pushing `LayoutCx` to users.
1542 impl TyCtxt<'a, 'tcx, '_> {
1543 /// Computes the layout of a type. Note that this implicitly
1544 /// executes in "reveal all" mode.
1546 pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
1547 -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
1549 tcx: self.global_tcx(),
1550 param_env: param_env_and_ty.param_env
1552 cx.layout_of(param_env_and_ty.value)
1556 impl ty::query::TyCtxtAt<'a, 'tcx, '_> {
1557 /// Computes the layout of a type. Note that this implicitly
1558 /// executes in "reveal all" mode.
1560 pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
1561 -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
1563 tcx: self.global_tcx().at(self.span),
1564 param_env: param_env_and_ty.param_env
1566 cx.layout_of(param_env_and_ty.value)
1570 impl<'a, 'tcx, C> TyLayoutMethods<'tcx, C> for Ty<'tcx>
1571 where C: LayoutOf<Ty = Ty<'tcx>> + HasTyCtxt<'tcx>,
1572 C::TyLayout: MaybeResult<TyLayout<'tcx>>
1574 fn for_variant(this: TyLayout<'tcx>, cx: &C, variant_index: VariantIdx) -> TyLayout<'tcx> {
1575 let details = match this.variants {
1576 Variants::Single { index } if index == variant_index => this.details,
1578 Variants::Single { index } => {
1579 // Deny calling for_variant more than once for non-Single enums.
1580 cx.layout_of(this.ty).map_same(|layout| {
1581 assert_eq!(layout.variants, Variants::Single { index });
1585 let fields = match this.ty.sty {
1586 ty::Adt(def, _) => def.variants[variant_index].fields.len(),
1590 tcx.intern_layout(LayoutDetails {
1591 variants: Variants::Single { index: variant_index },
1592 fields: FieldPlacement::Union(fields),
1593 abi: Abi::Uninhabited,
1594 align: tcx.data_layout.i8_align,
1599 Variants::NicheFilling { ref variants, .. } |
1600 Variants::Tagged { ref variants, .. } => {
1601 &variants[variant_index]
1605 assert_eq!(details.variants, Variants::Single { index: variant_index });
1613 fn field(this: TyLayout<'tcx>, cx: &C, i: usize) -> C::TyLayout {
1615 cx.layout_of(match this.ty.sty {
1624 ty::GeneratorWitness(..) |
1626 ty::Dynamic(..) => {
1627 bug!("TyLayout::field_type({:?}): not applicable", this)
1630 // Potentially-fat pointers.
1631 ty::Ref(_, pointee, _) |
1632 ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1633 assert!(i < this.fields.count());
1635 // Reuse the fat *T type as its own thin pointer data field.
1636 // This provides information about e.g. DST struct pointees
1637 // (which may have no non-DST form), and will work as long
1638 // as the `Abi` or `FieldPlacement` is checked by users.
1640 let nil = tcx.mk_unit();
1641 let ptr_ty = if this.ty.is_unsafe_ptr() {
1644 tcx.mk_mut_ref(tcx.types.re_static, nil)
1646 return cx.layout_of(ptr_ty).map_same(|mut ptr_layout| {
1647 ptr_layout.ty = this.ty;
1652 match tcx.struct_tail(pointee).sty {
1654 ty::Str => tcx.types.usize,
1655 ty::Dynamic(_, _) => {
1657 tcx.types.re_static,
1658 tcx.mk_array(tcx.types.usize, 3),
1660 /* FIXME use actual fn pointers
1661 Warning: naively computing the number of entries in the
1662 vtable by counting the methods on the trait + methods on
1663 all parent traits does not work, because some methods can
1664 be not object safe and thus excluded from the vtable.
1665 Increase this counter if you tried to implement this but
1666 failed to do it without duplicating a lot of code from
1667 other places in the compiler: 2
1669 tcx.mk_array(tcx.types.usize, 3),
1670 tcx.mk_array(Option<fn()>),
1674 _ => bug!("TyLayout::field_type({:?}): not applicable", this)
1678 // Arrays and slices.
1679 ty::Array(element, _) |
1680 ty::Slice(element) => element,
1681 ty::Str => tcx.types.u8,
1683 // Tuples, generators and closures.
1684 ty::Closure(def_id, ref substs) => {
1685 substs.upvar_tys(def_id, tcx).nth(i).unwrap()
1688 ty::Generator(def_id, ref substs, _) => {
1689 substs.field_tys(def_id, tcx).nth(i).unwrap()
1692 ty::Tuple(tys) => tys[i],
1694 // SIMD vector types.
1695 ty::Adt(def, ..) if def.repr.simd() => {
1696 this.ty.simd_type(tcx)
1700 ty::Adt(def, substs) => {
1701 match this.variants {
1702 Variants::Single { index } => {
1703 def.variants[index].fields[i].ty(tcx, substs)
1706 // Discriminant field for enums (where applicable).
1707 Variants::Tagged { tag: ref discr, .. } |
1708 Variants::NicheFilling { niche: ref discr, .. } => {
1710 let layout = LayoutDetails::scalar(cx, discr.clone());
1711 return MaybeResult::from_ok(TyLayout {
1712 details: tcx.intern_layout(layout),
1713 ty: discr.value.to_ty(tcx)
1719 ty::Projection(_) | ty::UnnormalizedProjection(..) | ty::Bound(..) |
1720 ty::Opaque(..) | ty::Param(_) | ty::Infer(_) | ty::Error => {
1721 bug!("TyLayout::field_type: unexpected type `{}`", this.ty)
1734 fn reserve<'a, 'tcx>(
1736 cx: &LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>>,
1738 ) -> Option<(u128, Scalar)> {
1739 if count > self.available {
1742 let Scalar { value, valid_range: ref v } = self.scalar;
1743 let bits = value.size(cx).bits();
1744 assert!(bits <= 128);
1745 let max_value = !0u128 >> (128 - bits);
1746 let start = v.end().wrapping_add(1) & max_value;
1747 let end = v.end().wrapping_add(count) & max_value;
1748 Some((start, Scalar { value, valid_range: *v.start()..=end }))
1752 impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
1753 /// Find the offset of a niche leaf field, starting from
1754 /// the given type and recursing through aggregates.
1755 // FIXME(eddyb) traverse already optimized enums.
1756 fn find_niche(&self, layout: TyLayout<'tcx>) -> Result<Option<Niche>, LayoutError<'tcx>> {
1757 let scalar_niche = |scalar: &Scalar, offset| {
1758 let Scalar { value, valid_range: ref v } = *scalar;
1760 let bits = value.size(self).bits();
1761 assert!(bits <= 128);
1762 let max_value = !0u128 >> (128 - bits);
1764 // Find out how many values are outside the valid range.
1765 let available = if v.start() <= v.end() {
1766 v.start() + (max_value - v.end())
1768 v.start() - v.end() - 1
1771 // Give up if there is no niche value available.
1776 Some(Niche { offset, scalar: scalar.clone(), available })
1779 // Locals variables which live across yields are stored
1780 // in the generator type as fields. These may be uninitialized
1781 // so we don't look for niches there.
1782 if let ty::Generator(..) = layout.ty.sty {
1787 Abi::Scalar(ref scalar) => {
1788 return Ok(scalar_niche(scalar, Size::ZERO));
1790 Abi::ScalarPair(ref a, ref b) => {
1791 // HACK(nox): We iter on `b` and then `a` because `max_by_key`
1792 // returns the last maximum.
1793 let niche = iter::once((b, a.value.size(self).abi_align(b.value.align(self))))
1794 .chain(iter::once((a, Size::ZERO)))
1795 .filter_map(|(scalar, offset)| scalar_niche(scalar, offset))
1796 .max_by_key(|niche| niche.available);
1799 Abi::Vector { ref element, .. } => {
1800 return Ok(scalar_niche(element, Size::ZERO));
1805 // Perhaps one of the fields is non-zero, let's recurse and find out.
1806 if let FieldPlacement::Union(_) = layout.fields {
1807 // Only Rust enums have safe-to-inspect fields
1808 // (a discriminant), other unions are unsafe.
1809 if let Variants::Single { .. } = layout.variants {
1813 if let FieldPlacement::Array { .. } = layout.fields {
1814 if layout.fields.count() > 0 {
1815 return self.find_niche(layout.field(self, 0)?);
1820 let mut niche = None;
1821 let mut available = 0;
1822 for i in 0..layout.fields.count() {
1823 if let Some(mut c) = self.find_niche(layout.field(self, i)?)? {
1824 if c.available > available {
1825 available = c.available;
1826 c.offset += layout.fields.offset(i);
1835 impl<'a> HashStable<StableHashingContext<'a>> for Variants {
1836 fn hash_stable<W: StableHasherResult>(&self,
1837 hcx: &mut StableHashingContext<'a>,
1838 hasher: &mut StableHasher<W>) {
1839 use ty::layout::Variants::*;
1840 mem::discriminant(self).hash_stable(hcx, hasher);
1843 Single { index } => {
1844 index.hash_stable(hcx, hasher);
1850 tag.hash_stable(hcx, hasher);
1851 variants.hash_stable(hcx, hasher);
1860 dataful_variant.hash_stable(hcx, hasher);
1861 niche_variants.start().hash_stable(hcx, hasher);
1862 niche_variants.end().hash_stable(hcx, hasher);
1863 niche.hash_stable(hcx, hasher);
1864 niche_start.hash_stable(hcx, hasher);
1865 variants.hash_stable(hcx, hasher);
1871 impl<'a> HashStable<StableHashingContext<'a>> for FieldPlacement {
1872 fn hash_stable<W: StableHasherResult>(&self,
1873 hcx: &mut StableHashingContext<'a>,
1874 hasher: &mut StableHasher<W>) {
1875 use ty::layout::FieldPlacement::*;
1876 mem::discriminant(self).hash_stable(hcx, hasher);
1880 count.hash_stable(hcx, hasher);
1882 Array { count, stride } => {
1883 count.hash_stable(hcx, hasher);
1884 stride.hash_stable(hcx, hasher);
1886 Arbitrary { ref offsets, ref memory_index } => {
1887 offsets.hash_stable(hcx, hasher);
1888 memory_index.hash_stable(hcx, hasher);
1894 impl<'a> HashStable<StableHashingContext<'a>> for VariantIdx {
1895 fn hash_stable<W: StableHasherResult>(
1897 hcx: &mut StableHashingContext<'a>,
1898 hasher: &mut StableHasher<W>,
1900 self.as_u32().hash_stable(hcx, hasher)
1904 impl<'a> HashStable<StableHashingContext<'a>> for Abi {
1905 fn hash_stable<W: StableHasherResult>(&self,
1906 hcx: &mut StableHashingContext<'a>,
1907 hasher: &mut StableHasher<W>) {
1908 use ty::layout::Abi::*;
1909 mem::discriminant(self).hash_stable(hcx, hasher);
1913 Scalar(ref value) => {
1914 value.hash_stable(hcx, hasher);
1916 ScalarPair(ref a, ref b) => {
1917 a.hash_stable(hcx, hasher);
1918 b.hash_stable(hcx, hasher);
1920 Vector { ref element, count } => {
1921 element.hash_stable(hcx, hasher);
1922 count.hash_stable(hcx, hasher);
1924 Aggregate { sized } => {
1925 sized.hash_stable(hcx, hasher);
1931 impl<'a> HashStable<StableHashingContext<'a>> for Scalar {
1932 fn hash_stable<W: StableHasherResult>(&self,
1933 hcx: &mut StableHashingContext<'a>,
1934 hasher: &mut StableHasher<W>) {
1935 let Scalar { value, ref valid_range } = *self;
1936 value.hash_stable(hcx, hasher);
1937 valid_range.start().hash_stable(hcx, hasher);
1938 valid_range.end().hash_stable(hcx, hasher);
1942 impl_stable_hash_for!(struct ::ty::layout::LayoutDetails {
1950 impl_stable_hash_for!(enum ::ty::layout::Integer {
1958 impl_stable_hash_for!(enum ::ty::layout::Primitive {
1959 Int(integer, signed),
1964 impl<'gcx> HashStable<StableHashingContext<'gcx>> for Align {
1965 fn hash_stable<W: StableHasherResult>(&self,
1966 hcx: &mut StableHashingContext<'gcx>,
1967 hasher: &mut StableHasher<W>) {
1968 self.abi().hash_stable(hcx, hasher);
1969 self.pref().hash_stable(hcx, hasher);
1973 impl<'gcx> HashStable<StableHashingContext<'gcx>> for Size {
1974 fn hash_stable<W: StableHasherResult>(&self,
1975 hcx: &mut StableHashingContext<'gcx>,
1976 hasher: &mut StableHasher<W>) {
1977 self.bytes().hash_stable(hcx, hasher);
1981 impl<'a, 'gcx> HashStable<StableHashingContext<'a>> for LayoutError<'gcx>
1983 fn hash_stable<W: StableHasherResult>(&self,
1984 hcx: &mut StableHashingContext<'a>,
1985 hasher: &mut StableHasher<W>) {
1986 use ty::layout::LayoutError::*;
1987 mem::discriminant(self).hash_stable(hcx, hasher);
1991 SizeOverflow(t) => t.hash_stable(hcx, hasher)