1 // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 use session::{self, DataTypeKind};
12 use ty::{self, Ty, TyCtxt, TypeFoldable, ReprOptions};
14 use syntax::ast::{self, IntTy, UintTy};
16 use syntax_pos::DUMMY_SP;
25 use ich::StableHashingContext;
26 use rustc_data_structures::stable_hasher::{HashStable, StableHasher,
29 pub use rustc_target::abi::*;
31 pub trait IntegerExt {
32 fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, signed: bool) -> Ty<'tcx>;
33 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
34 fn repr_discr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
42 impl IntegerExt for Integer {
43 fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, signed: bool) -> Ty<'tcx> {
44 match (*self, signed) {
45 (I8, false) => tcx.types.u8,
46 (I16, false) => tcx.types.u16,
47 (I32, false) => tcx.types.u32,
48 (I64, false) => tcx.types.u64,
49 (I128, false) => tcx.types.u128,
50 (I8, true) => tcx.types.i8,
51 (I16, true) => tcx.types.i16,
52 (I32, true) => tcx.types.i32,
53 (I64, true) => tcx.types.i64,
54 (I128, true) => tcx.types.i128,
58 /// Get the Integer type from an attr::IntType.
59 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
60 let dl = cx.data_layout();
63 attr::SignedInt(IntTy::I8) | attr::UnsignedInt(UintTy::U8) => I8,
64 attr::SignedInt(IntTy::I16) | attr::UnsignedInt(UintTy::U16) => I16,
65 attr::SignedInt(IntTy::I32) | attr::UnsignedInt(UintTy::U32) => I32,
66 attr::SignedInt(IntTy::I64) | attr::UnsignedInt(UintTy::U64) => I64,
67 attr::SignedInt(IntTy::I128) | attr::UnsignedInt(UintTy::U128) => I128,
68 attr::SignedInt(IntTy::Isize) | attr::UnsignedInt(UintTy::Usize) => {
69 dl.ptr_sized_integer()
74 /// Find the appropriate Integer type and signedness for the given
75 /// signed discriminant range and #[repr] attribute.
76 /// N.B.: u128 values above i128::MAX will be treated as signed, but
77 /// that shouldn't affect anything, other than maybe debuginfo.
78 fn repr_discr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
84 // Theoretically, negative values could be larger in unsigned representation
85 // than the unsigned representation of the signed minimum. However, if there
86 // are any negative values, the only valid unsigned representation is u128
87 // which can fit all i128 values, so the result remains unaffected.
88 let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
89 let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
91 let mut min_from_extern = None;
94 if let Some(ity) = repr.int {
95 let discr = Integer::from_attr(&tcx, ity);
96 let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
98 bug!("Integer::repr_discr: `#[repr]` hint too small for \
99 discriminant range of enum `{}", ty)
101 return (discr, ity.is_signed());
105 match &tcx.sess.target.target.arch[..] {
106 // WARNING: the ARM EABI has two variants; the one corresponding
107 // to `at_least == I32` appears to be used on Linux and NetBSD,
108 // but some systems may use the variant corresponding to no
109 // lower bound. However, we don't run on those yet...?
110 "arm" => min_from_extern = Some(I32),
111 _ => min_from_extern = Some(I32),
115 let at_least = min_from_extern.unwrap_or(min_default);
117 // If there are no negative values, we can use the unsigned fit.
119 (cmp::max(unsigned_fit, at_least), false)
121 (cmp::max(signed_fit, at_least), true)
126 pub trait PrimitiveExt {
127 fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx>;
130 impl PrimitiveExt for Primitive {
131 fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx> {
133 Int(i, signed) => i.to_ty(tcx, signed),
134 Float(FloatTy::F32) => tcx.types.f32,
135 Float(FloatTy::F64) => tcx.types.f64,
136 Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
141 /// The first half of a fat pointer.
143 /// - For a trait object, this is the address of the box.
144 /// - For a slice, this is the base address.
145 pub const FAT_PTR_ADDR: usize = 0;
147 /// The second half of a fat pointer.
149 /// - For a trait object, this is the address of the vtable.
150 /// - For a slice, this is the length.
151 pub const FAT_PTR_EXTRA: usize = 1;
153 #[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable)]
154 pub enum LayoutError<'tcx> {
156 SizeOverflow(Ty<'tcx>)
159 impl<'tcx> fmt::Display for LayoutError<'tcx> {
160 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
162 LayoutError::Unknown(ty) => {
163 write!(f, "the type `{:?}` has an unknown layout", ty)
165 LayoutError::SizeOverflow(ty) => {
166 write!(f, "the type `{:?}` is too big for the current architecture", ty)
172 fn layout_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
173 query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
174 -> Result<&'tcx LayoutDetails, LayoutError<'tcx>>
176 ty::tls::with_related_context(tcx, move |icx| {
177 let rec_limit = *tcx.sess.recursion_limit.get();
178 let (param_env, ty) = query.into_parts();
180 if icx.layout_depth > rec_limit {
182 &format!("overflow representing the type `{}`", ty));
185 // Update the ImplicitCtxt to increase the layout_depth
186 let icx = ty::tls::ImplicitCtxt {
187 layout_depth: icx.layout_depth + 1,
191 ty::tls::enter_context(&icx, |_| {
192 let cx = LayoutCx { tcx, param_env };
193 cx.layout_raw_uncached(ty)
198 pub fn provide(providers: &mut ty::query::Providers<'_>) {
199 *providers = ty::query::Providers {
205 pub struct LayoutCx<'tcx, C> {
207 pub param_env: ty::ParamEnv<'tcx>
210 impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
211 fn layout_raw_uncached(&self, ty: Ty<'tcx>)
212 -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> {
214 let param_env = self.param_env;
215 let dl = self.data_layout();
216 let scalar_unit = |value: Primitive| {
217 let bits = value.size(dl).bits();
218 assert!(bits <= 128);
221 valid_range: 0..=(!0 >> (128 - bits))
224 let scalar = |value: Primitive| {
225 tcx.intern_layout(LayoutDetails::scalar(self, scalar_unit(value)))
227 let scalar_pair = |a: Scalar, b: Scalar| {
228 let align = a.value.align(dl).max(b.value.align(dl)).max(dl.aggregate_align);
229 let b_offset = a.value.size(dl).abi_align(b.value.align(dl));
230 let size = (b_offset + b.value.size(dl)).abi_align(align);
232 variants: Variants::Single { index: 0 },
233 fields: FieldPlacement::Arbitrary {
234 offsets: vec![Size::ZERO, b_offset],
235 memory_index: vec![0, 1]
237 abi: Abi::ScalarPair(a, b),
243 #[derive(Copy, Clone, Debug)]
245 /// A tuple, closure, or univariant which cannot be coerced to unsized.
247 /// A univariant, the last field of which may be coerced to unsized.
249 /// A univariant, but with a prefix of an arbitrary size & alignment (e.g. enum tag).
250 Prefixed(Size, Align),
253 let univariant_uninterned = |fields: &[TyLayout<'_>], repr: &ReprOptions, kind| {
254 let packed = repr.packed();
255 if packed && repr.align > 0 {
256 bug!("struct cannot be packed and aligned");
260 let pack = repr.pack as u64;
261 Align::from_bytes(pack, pack).unwrap()
264 let mut align = if packed {
270 let mut sized = true;
271 let mut offsets = vec![Size::ZERO; fields.len()];
272 let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
274 let mut optimize = !repr.inhibit_struct_field_reordering_opt();
275 if let StructKind::Prefixed(_, align) = kind {
276 optimize &= align.abi() == 1;
280 let end = if let StructKind::MaybeUnsized = kind {
285 let optimizing = &mut inverse_memory_index[..end];
286 let field_align = |f: &TyLayout<'_>| {
287 if packed { f.align.min(pack).abi() } else { f.align.abi() }
290 StructKind::AlwaysSized |
291 StructKind::MaybeUnsized => {
292 optimizing.sort_by_key(|&x| {
293 // Place ZSTs first to avoid "interesting offsets",
294 // especially with only one or two non-ZST fields.
295 let f = &fields[x as usize];
296 (!f.is_zst(), cmp::Reverse(field_align(f)))
299 StructKind::Prefixed(..) => {
300 optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
305 // inverse_memory_index holds field indices by increasing memory offset.
306 // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
307 // We now write field offsets to the corresponding offset slot;
308 // field 5 with offset 0 puts 0 in offsets[5].
309 // At the bottom of this function, we use inverse_memory_index to produce memory_index.
311 let mut offset = Size::ZERO;
313 if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
315 let prefix_align = prefix_align.min(pack);
316 align = align.max(prefix_align);
318 align = align.max(prefix_align);
320 offset = prefix_size.abi_align(prefix_align);
323 for &i in &inverse_memory_index {
324 let field = fields[i as usize];
326 bug!("univariant: field #{} of `{}` comes after unsized field",
330 if field.is_unsized() {
334 // Invariant: offset < dl.obj_size_bound() <= 1<<61
336 let field_pack = field.align.min(pack);
337 offset = offset.abi_align(field_pack);
338 align = align.max(field_pack);
341 offset = offset.abi_align(field.align);
342 align = align.max(field.align);
345 debug!("univariant offset: {:?} field: {:#?}", offset, field);
346 offsets[i as usize] = offset;
348 offset = offset.checked_add(field.size, dl)
349 .ok_or(LayoutError::SizeOverflow(ty))?;
353 let repr_align = repr.align as u64;
354 align = align.max(Align::from_bytes(repr_align, repr_align).unwrap());
355 debug!("univariant repr_align: {:?}", repr_align);
358 debug!("univariant min_size: {:?}", offset);
359 let min_size = offset;
361 // As stated above, inverse_memory_index holds field indices by increasing offset.
362 // This makes it an already-sorted view of the offsets vec.
363 // To invert it, consider:
364 // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
365 // Field 5 would be the first element, so memory_index is i:
366 // Note: if we didn't optimize, it's already right.
368 let mut memory_index;
370 memory_index = vec![0; inverse_memory_index.len()];
372 for i in 0..inverse_memory_index.len() {
373 memory_index[inverse_memory_index[i] as usize] = i as u32;
376 memory_index = inverse_memory_index;
379 let size = min_size.abi_align(align);
380 let mut abi = Abi::Aggregate { sized };
382 // Unpack newtype ABIs and find scalar pairs.
383 if sized && size.bytes() > 0 {
384 // All other fields must be ZSTs, and we need them to all start at 0.
385 let mut zst_offsets =
386 offsets.iter().enumerate().filter(|&(i, _)| fields[i].is_zst());
387 if zst_offsets.all(|(_, o)| o.bytes() == 0) {
388 let mut non_zst_fields =
389 fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
391 match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
392 // We have exactly one non-ZST field.
393 (Some((i, field)), None, None) => {
394 // Field fills the struct and it has a scalar or scalar pair ABI.
395 if offsets[i].bytes() == 0 &&
396 align.abi() == field.align.abi() &&
399 // For plain scalars, or vectors of them, we can't unpack
400 // newtypes for `#[repr(C)]`, as that affects C ABIs.
401 Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
402 abi = field.abi.clone();
404 // But scalar pairs are Rust-specific and get
405 // treated as aggregates by C ABIs anyway.
406 Abi::ScalarPair(..) => {
407 abi = field.abi.clone();
414 // Two non-ZST fields, and they're both scalars.
415 (Some((i, &TyLayout {
416 details: &LayoutDetails { abi: Abi::Scalar(ref a), .. }, ..
417 })), Some((j, &TyLayout {
418 details: &LayoutDetails { abi: Abi::Scalar(ref b), .. }, ..
420 // Order by the memory placement, not source order.
421 let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
426 let pair = scalar_pair(a.clone(), b.clone());
427 let pair_offsets = match pair.fields {
428 FieldPlacement::Arbitrary {
432 assert_eq!(memory_index, &[0, 1]);
437 if offsets[i] == pair_offsets[0] &&
438 offsets[j] == pair_offsets[1] &&
439 align == pair.align &&
441 // We can use `ScalarPair` only when it matches our
442 // already computed layout (including `#[repr(C)]`).
452 if sized && fields.iter().any(|f| f.abi.is_uninhabited()) {
453 abi = Abi::Uninhabited;
457 variants: Variants::Single { index: 0 },
458 fields: FieldPlacement::Arbitrary {
467 let univariant = |fields: &[TyLayout<'_>], repr: &ReprOptions, kind| {
468 Ok(tcx.intern_layout(univariant_uninterned(fields, repr, kind)?))
470 debug_assert!(!ty.has_infer_types());
475 tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
476 value: Int(I8, false),
481 tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
482 value: Int(I32, false),
483 valid_range: 0..=0x10FFFF
487 scalar(Int(Integer::from_attr(dl, attr::SignedInt(ity)), true))
490 scalar(Int(Integer::from_attr(dl, attr::UnsignedInt(ity)), false))
492 ty::Float(fty) => scalar(Float(fty)),
494 let mut ptr = scalar_unit(Pointer);
495 ptr.valid_range = 1..=*ptr.valid_range.end();
496 tcx.intern_layout(LayoutDetails::scalar(self, ptr))
501 tcx.intern_layout(LayoutDetails {
502 variants: Variants::Single { index: 0 },
503 fields: FieldPlacement::Union(0),
504 abi: Abi::Uninhabited,
510 // Potentially-fat pointers.
511 ty::Ref(_, pointee, _) |
512 ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
513 let mut data_ptr = scalar_unit(Pointer);
514 if !ty.is_unsafe_ptr() {
515 data_ptr.valid_range = 1..=*data_ptr.valid_range.end();
518 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
519 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
520 return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
523 let unsized_part = tcx.struct_tail(pointee);
524 let metadata = match unsized_part.sty {
526 return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
528 ty::Slice(_) | ty::Str => {
529 scalar_unit(Int(dl.ptr_sized_integer(), false))
532 let mut vtable = scalar_unit(Pointer);
533 vtable.valid_range = 1..=*vtable.valid_range.end();
536 _ => return Err(LayoutError::Unknown(unsized_part))
539 // Effectively a (ptr, meta) tuple.
540 tcx.intern_layout(scalar_pair(data_ptr, metadata))
543 // Arrays and slices.
544 ty::Array(element, mut count) => {
545 if count.has_projections() {
546 count = tcx.normalize_erasing_regions(param_env, count);
547 if count.has_projections() {
548 return Err(LayoutError::Unknown(ty));
552 let element = self.layout_of(element)?;
553 let count = count.unwrap_usize(tcx);
554 let size = element.size.checked_mul(count, dl)
555 .ok_or(LayoutError::SizeOverflow(ty))?;
557 tcx.intern_layout(LayoutDetails {
558 variants: Variants::Single { index: 0 },
559 fields: FieldPlacement::Array {
560 stride: element.size,
563 abi: Abi::Aggregate { sized: true },
564 align: element.align,
568 ty::Slice(element) => {
569 let element = self.layout_of(element)?;
570 tcx.intern_layout(LayoutDetails {
571 variants: Variants::Single { index: 0 },
572 fields: FieldPlacement::Array {
573 stride: element.size,
576 abi: Abi::Aggregate { sized: false },
577 align: element.align,
582 tcx.intern_layout(LayoutDetails {
583 variants: Variants::Single { index: 0 },
584 fields: FieldPlacement::Array {
585 stride: Size::from_bytes(1),
588 abi: Abi::Aggregate { sized: false },
596 univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?
598 ty::Dynamic(..) | ty::Foreign(..) => {
599 let mut unit = univariant_uninterned(&[], &ReprOptions::default(),
600 StructKind::AlwaysSized)?;
602 Abi::Aggregate { ref mut sized } => *sized = false,
605 tcx.intern_layout(unit)
608 // Tuples, generators and closures.
609 ty::Generator(def_id, ref substs, _) => {
610 let tys = substs.field_tys(def_id, tcx);
611 univariant(&tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
612 &ReprOptions::default(),
613 StructKind::AlwaysSized)?
616 ty::Closure(def_id, ref substs) => {
617 let tys = substs.upvar_tys(def_id, tcx);
618 univariant(&tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
619 &ReprOptions::default(),
620 StructKind::AlwaysSized)?
624 let kind = if tys.len() == 0 {
625 StructKind::AlwaysSized
627 StructKind::MaybeUnsized
630 univariant(&tys.iter().map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
631 &ReprOptions::default(), kind)?
634 // SIMD vector types.
635 ty::Adt(def, ..) if def.repr.simd() => {
636 let element = self.layout_of(ty.simd_type(tcx))?;
637 let count = ty.simd_size(tcx) as u64;
639 let scalar = match element.abi {
640 Abi::Scalar(ref scalar) => scalar.clone(),
642 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` with \
643 a non-machine element type `{}`",
647 let size = element.size.checked_mul(count, dl)
648 .ok_or(LayoutError::SizeOverflow(ty))?;
649 let align = dl.vector_align(size);
650 let size = size.abi_align(align);
652 tcx.intern_layout(LayoutDetails {
653 variants: Variants::Single { index: 0 },
654 fields: FieldPlacement::Array {
655 stride: element.size,
668 ty::Adt(def, substs) => {
669 // Cache the field layouts.
670 let variants = def.variants.iter().map(|v| {
671 v.fields.iter().map(|field| {
672 self.layout_of(field.ty(tcx, substs))
673 }).collect::<Result<Vec<_>, _>>()
674 }).collect::<Result<Vec<_>, _>>()?;
677 let packed = def.repr.packed();
678 if packed && def.repr.align > 0 {
679 bug!("Union cannot be packed and aligned");
683 let pack = def.repr.pack as u64;
684 Align::from_bytes(pack, pack).unwrap()
687 let mut align = if packed {
693 if def.repr.align > 0 {
694 let repr_align = def.repr.align as u64;
696 Align::from_bytes(repr_align, repr_align).unwrap());
699 let mut size = Size::ZERO;
700 for field in &variants[0] {
701 assert!(!field.is_unsized());
704 let field_pack = field.align.min(pack);
705 align = align.max(field_pack);
707 align = align.max(field.align);
709 size = cmp::max(size, field.size);
712 return Ok(tcx.intern_layout(LayoutDetails {
713 variants: Variants::Single { index: 0 },
714 fields: FieldPlacement::Union(variants[0].len()),
715 abi: Abi::Aggregate { sized: true },
717 size: size.abi_align(align)
721 // A variant is absent if it's uninhabited and only has ZST fields.
722 // Present uninhabited variants only require space for their fields,
723 // but *not* an encoding of the discriminant (e.g. a tag value).
724 // See issue #49298 for more details on the need to leave space
725 // for non-ZST uninhabited data (mostly partial initialization).
726 let absent = |fields: &[TyLayout<'_>]| {
727 let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
728 let is_zst = fields.iter().all(|f| f.is_zst());
729 uninhabited && is_zst
731 let (present_first, present_second) = {
732 let mut present_variants = (0..variants.len()).filter(|&v| {
733 !absent(&variants[v])
735 (present_variants.next(), present_variants.next())
737 if present_first.is_none() {
738 // Uninhabited because it has no variants, or only absent ones.
739 return tcx.layout_raw(param_env.and(tcx.types.never));
742 let is_struct = !def.is_enum() ||
743 // Only one variant is present.
744 (present_second.is_none() &&
745 // Representation optimizations are allowed.
746 !def.repr.inhibit_enum_layout_opt());
748 // Struct, or univariant enum equivalent to a struct.
749 // (Typechecking will reject discriminant-sizing attrs.)
751 let v = present_first.unwrap();
752 let kind = if def.is_enum() || variants[v].len() == 0 {
753 StructKind::AlwaysSized
755 let param_env = tcx.param_env(def.did);
756 let last_field = def.variants[v].fields.last().unwrap();
757 let always_sized = tcx.type_of(last_field.did)
758 .is_sized(tcx.at(DUMMY_SP), param_env);
759 if !always_sized { StructKind::MaybeUnsized }
760 else { StructKind::AlwaysSized }
763 let mut st = univariant_uninterned(&variants[v], &def.repr, kind)?;
764 st.variants = Variants::Single { index: v };
765 let (start, end) = self.tcx.layout_scalar_valid_range(def.did);
767 Abi::Scalar(ref mut scalar) |
768 Abi::ScalarPair(ref mut scalar, _) => {
769 // the asserts ensure that we are not using the
770 // `#[rustc_layout_scalar_valid_range(n)]`
771 // attribute to widen the range of anything as that would probably
772 // result in UB somewhere
773 if let Bound::Included(start) = start {
774 assert!(*scalar.valid_range.start() <= start);
775 scalar.valid_range = start..=*scalar.valid_range.end();
777 if let Bound::Included(end) = end {
778 assert!(*scalar.valid_range.end() >= end);
779 scalar.valid_range = *scalar.valid_range.start()..=end;
783 start == Bound::Unbounded && end == Bound::Unbounded,
784 "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
789 return Ok(tcx.intern_layout(st));
792 // The current code for niche-filling relies on variant indices
793 // instead of actual discriminants, so dataful enums with
794 // explicit discriminants (RFC #2363) would misbehave.
795 let no_explicit_discriminants = def.variants.iter().enumerate()
796 .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i));
798 // Niche-filling enum optimization.
799 if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
800 let mut dataful_variant = None;
801 let mut niche_variants = usize::max_value()..=0;
803 // Find one non-ZST variant.
804 'variants: for (v, fields) in variants.iter().enumerate() {
810 if dataful_variant.is_none() {
811 dataful_variant = Some(v);
814 dataful_variant = None;
819 niche_variants = *niche_variants.start().min(&v)..=v;
822 if niche_variants.start() > niche_variants.end() {
823 dataful_variant = None;
826 if let Some(i) = dataful_variant {
827 let count = (niche_variants.end() - niche_variants.start() + 1) as u128;
828 for (field_index, &field) in variants[i].iter().enumerate() {
829 let niche = match self.find_niche(field)? {
830 Some(niche) => niche,
833 let (niche_start, niche_scalar) = match niche.reserve(self, count) {
838 let mut align = dl.aggregate_align;
839 let st = variants.iter().enumerate().map(|(j, v)| {
840 let mut st = univariant_uninterned(v,
841 &def.repr, StructKind::AlwaysSized)?;
842 st.variants = Variants::Single { index: j };
844 align = align.max(st.align);
847 }).collect::<Result<Vec<_>, _>>()?;
849 let offset = st[i].fields.offset(field_index) + niche.offset;
850 let size = st[i].size;
852 let mut abi = match st[i].abi {
853 Abi::Scalar(_) => Abi::Scalar(niche_scalar.clone()),
854 Abi::ScalarPair(ref first, ref second) => {
855 // We need to use scalar_unit to reset the
856 // valid range to the maximal one for that
857 // primitive, because only the niche is
858 // guaranteed to be initialised, not the
860 if offset.bytes() == 0 {
862 niche_scalar.clone(),
863 scalar_unit(second.value),
867 scalar_unit(first.value),
868 niche_scalar.clone(),
872 _ => Abi::Aggregate { sized: true },
875 if st.iter().all(|v| v.abi.is_uninhabited()) {
876 abi = Abi::Uninhabited;
879 return Ok(tcx.intern_layout(LayoutDetails {
880 variants: Variants::NicheFilling {
887 fields: FieldPlacement::Arbitrary {
888 offsets: vec![offset],
889 memory_index: vec![0]
899 let (mut min, mut max) = (i128::max_value(), i128::min_value());
900 let discr_type = def.repr.discr_type();
901 let bits = Integer::from_attr(self, discr_type).size().bits();
902 for (i, discr) in def.discriminants(tcx).enumerate() {
903 if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
906 let mut x = discr.val as i128;
907 if discr_type.is_signed() {
908 // sign extend the raw representation to be an i128
909 x = (x << (128 - bits)) >> (128 - bits);
911 if x < min { min = x; }
912 if x > max { max = x; }
914 // We might have no inhabited variants, so pretend there's at least one.
915 if (min, max) == (i128::max_value(), i128::min_value()) {
919 assert!(min <= max, "discriminant range is {}...{}", min, max);
920 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
922 let mut align = dl.aggregate_align;
923 let mut size = Size::ZERO;
925 // We're interested in the smallest alignment, so start large.
926 let mut start_align = Align::from_bytes(256, 256).unwrap();
927 assert_eq!(Integer::for_abi_align(dl, start_align), None);
929 // repr(C) on an enum tells us to make a (tag, union) layout,
930 // so we need to grow the prefix alignment to be at least
931 // the alignment of the union. (This value is used both for
932 // determining the alignment of the overall enum, and the
933 // determining the alignment of the payload after the tag.)
934 let mut prefix_align = min_ity.align(dl);
936 for fields in &variants {
937 for field in fields {
938 prefix_align = prefix_align.max(field.align);
943 // Create the set of structs that represent each variant.
944 let mut layout_variants = variants.iter().enumerate().map(|(i, field_layouts)| {
945 let mut st = univariant_uninterned(&field_layouts,
946 &def.repr, StructKind::Prefixed(min_ity.size(), prefix_align))?;
947 st.variants = Variants::Single { index: i };
948 // Find the first field we can't move later
949 // to make room for a larger discriminant.
950 for field in st.fields.index_by_increasing_offset().map(|j| field_layouts[j]) {
951 if !field.is_zst() || field.align.abi() != 1 {
952 start_align = start_align.min(field.align);
956 size = cmp::max(size, st.size);
957 align = align.max(st.align);
959 }).collect::<Result<Vec<_>, _>>()?;
961 // Align the maximum variant size to the largest alignment.
962 size = size.abi_align(align);
964 if size.bytes() >= dl.obj_size_bound() {
965 return Err(LayoutError::SizeOverflow(ty));
968 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
969 if typeck_ity < min_ity {
970 // It is a bug if Layout decided on a greater discriminant size than typeck for
971 // some reason at this point (based on values discriminant can take on). Mostly
972 // because this discriminant will be loaded, and then stored into variable of
973 // type calculated by typeck. Consider such case (a bug): typeck decided on
974 // byte-sized discriminant, but layout thinks we need a 16-bit to store all
975 // discriminant values. That would be a bug, because then, in codegen, in order
976 // to store this 16-bit discriminant into 8-bit sized temporary some of the
977 // space necessary to represent would have to be discarded (or layout is wrong
978 // on thinking it needs 16 bits)
979 bug!("layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
980 min_ity, typeck_ity);
981 // However, it is fine to make discr type however large (as an optimisation)
982 // after this point – we’ll just truncate the value we load in codegen.
985 // Check to see if we should use a different type for the
986 // discriminant. We can safely use a type with the same size
987 // as the alignment of the first field of each variant.
988 // We increase the size of the discriminant to avoid LLVM copying
989 // padding when it doesn't need to. This normally causes unaligned
990 // load/stores and excessive memcpy/memset operations. By using a
991 // bigger integer size, LLVM can be sure about its contents and
992 // won't be so conservative.
994 // Use the initial field alignment
995 let mut ity = if def.repr.c() || def.repr.int.is_some() {
998 Integer::for_abi_align(dl, start_align).unwrap_or(min_ity)
1001 // If the alignment is not larger than the chosen discriminant size,
1002 // don't use the alignment as the final size.
1006 // Patch up the variants' first few fields.
1007 let old_ity_size = min_ity.size();
1008 let new_ity_size = ity.size();
1009 for variant in &mut layout_variants {
1010 match variant.fields {
1011 FieldPlacement::Arbitrary { ref mut offsets, .. } => {
1013 if *i <= old_ity_size {
1014 assert_eq!(*i, old_ity_size);
1018 // We might be making the struct larger.
1019 if variant.size <= old_ity_size {
1020 variant.size = new_ity_size;
1028 let tag_mask = !0u128 >> (128 - ity.size().bits());
1030 value: Int(ity, signed),
1031 valid_range: (min as u128 & tag_mask)..=(max as u128 & tag_mask),
1033 let mut abi = Abi::Aggregate { sized: true };
1034 if tag.value.size(dl) == size {
1035 abi = Abi::Scalar(tag.clone());
1037 // Try to use a ScalarPair for all tagged enums.
1038 let mut common_prim = None;
1039 for (field_layouts, layout_variant) in variants.iter().zip(&layout_variants) {
1040 let offsets = match layout_variant.fields {
1041 FieldPlacement::Arbitrary { ref offsets, .. } => offsets,
1044 let mut fields = field_layouts
1047 .filter(|p| !p.0.is_zst());
1048 let (field, offset) = match (fields.next(), fields.next()) {
1049 (None, None) => continue,
1050 (Some(pair), None) => pair,
1056 let prim = match field.details.abi {
1057 Abi::Scalar(ref scalar) => scalar.value,
1063 if let Some(pair) = common_prim {
1064 // This is pretty conservative. We could go fancier
1065 // by conflating things like i32 and u32, or even
1066 // realising that (u8, u8) could just cohabit with
1068 if pair != (prim, offset) {
1073 common_prim = Some((prim, offset));
1076 if let Some((prim, offset)) = common_prim {
1077 let pair = scalar_pair(tag.clone(), scalar_unit(prim));
1078 let pair_offsets = match pair.fields {
1079 FieldPlacement::Arbitrary {
1083 assert_eq!(memory_index, &[0, 1]);
1088 if pair_offsets[0] == Size::ZERO &&
1089 pair_offsets[1] == *offset &&
1090 align == pair.align &&
1092 // We can use `ScalarPair` only when it matches our
1093 // already computed layout (including `#[repr(C)]`).
1099 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1100 abi = Abi::Uninhabited;
1103 tcx.intern_layout(LayoutDetails {
1104 variants: Variants::Tagged {
1106 variants: layout_variants,
1108 fields: FieldPlacement::Arbitrary {
1109 offsets: vec![Size::ZERO],
1110 memory_index: vec![0]
1118 // Types with no meaningful known layout.
1119 ty::Projection(_) | ty::Opaque(..) => {
1120 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1121 if ty == normalized {
1122 return Err(LayoutError::Unknown(ty));
1124 tcx.layout_raw(param_env.and(normalized))?
1128 ty::UnnormalizedProjection(..) |
1129 ty::GeneratorWitness(..) |
1131 bug!("LayoutDetails::compute: unexpected type `{}`", ty)
1134 ty::Param(_) | ty::Error => {
1135 return Err(LayoutError::Unknown(ty));
1140 /// This is invoked by the `layout_raw` query to record the final
1141 /// layout of each type.
1143 fn record_layout_for_printing(&self, layout: TyLayout<'tcx>) {
1144 // If we are running with `-Zprint-type-sizes`, record layouts for
1145 // dumping later. Ignore layouts that are done with non-empty
1146 // environments or non-monomorphic layouts, as the user only wants
1147 // to see the stuff resulting from the final codegen session.
1149 !self.tcx.sess.opts.debugging_opts.print_type_sizes ||
1150 layout.ty.has_param_types() ||
1151 layout.ty.has_self_ty() ||
1152 !self.param_env.caller_bounds.is_empty()
1157 self.record_layout_for_printing_outlined(layout)
1160 fn record_layout_for_printing_outlined(&self, layout: TyLayout<'tcx>) {
1161 // (delay format until we actually need it)
1162 let record = |kind, packed, opt_discr_size, variants| {
1163 let type_desc = format!("{:?}", layout.ty);
1164 self.tcx.sess.code_stats.borrow_mut().record_type_size(kind,
1173 let adt_def = match layout.ty.sty {
1174 ty::Adt(ref adt_def, _) => {
1175 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1179 ty::Closure(..) => {
1180 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1181 record(DataTypeKind::Closure, false, None, vec![]);
1186 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1191 let adt_kind = adt_def.adt_kind();
1192 let adt_packed = adt_def.repr.packed();
1194 let build_variant_info = |n: Option<ast::Name>,
1196 layout: TyLayout<'tcx>| {
1197 let mut min_size = Size::ZERO;
1198 let field_info: Vec<_> = flds.iter().enumerate().map(|(i, &name)| {
1199 match layout.field(self, i) {
1201 bug!("no layout found for field {}: `{:?}`", name, err);
1203 Ok(field_layout) => {
1204 let offset = layout.fields.offset(i);
1205 let field_end = offset + field_layout.size;
1206 if min_size < field_end {
1207 min_size = field_end;
1209 session::FieldInfo {
1210 name: name.to_string(),
1211 offset: offset.bytes(),
1212 size: field_layout.size.bytes(),
1213 align: field_layout.align.abi(),
1219 session::VariantInfo {
1220 name: n.map(|n|n.to_string()),
1221 kind: if layout.is_unsized() {
1222 session::SizeKind::Min
1224 session::SizeKind::Exact
1226 align: layout.align.abi(),
1227 size: if min_size.bytes() == 0 {
1236 match layout.variants {
1237 Variants::Single { index } => {
1238 debug!("print-type-size `{:#?}` variant {}",
1239 layout, adt_def.variants[index].name);
1240 if !adt_def.variants.is_empty() {
1241 let variant_def = &adt_def.variants[index];
1242 let fields: Vec<_> =
1243 variant_def.fields.iter().map(|f| f.ident.name).collect();
1244 record(adt_kind.into(),
1247 vec![build_variant_info(Some(variant_def.name),
1251 // (This case arises for *empty* enums; so give it
1253 record(adt_kind.into(), adt_packed, None, vec![]);
1257 Variants::NicheFilling { .. } |
1258 Variants::Tagged { .. } => {
1259 debug!("print-type-size `{:#?}` adt general variants def {}",
1260 layout.ty, adt_def.variants.len());
1261 let variant_infos: Vec<_> =
1262 adt_def.variants.iter().enumerate().map(|(i, variant_def)| {
1263 let fields: Vec<_> =
1264 variant_def.fields.iter().map(|f| f.ident.name).collect();
1265 build_variant_info(Some(variant_def.name),
1267 layout.for_variant(self, i))
1270 record(adt_kind.into(), adt_packed, match layout.variants {
1271 Variants::Tagged { ref tag, .. } => Some(tag.value.size(self)),
1279 /// Type size "skeleton", i.e. the only information determining a type's size.
1280 /// While this is conservative, (aside from constant sizes, only pointers,
1281 /// newtypes thereof and null pointer optimized enums are allowed), it is
1282 /// enough to statically check common usecases of transmute.
1283 #[derive(Copy, Clone, Debug)]
1284 pub enum SizeSkeleton<'tcx> {
1285 /// Any statically computable Layout.
1288 /// A potentially-fat pointer.
1290 /// If true, this pointer is never null.
1292 /// The type which determines the unsized metadata, if any,
1293 /// of this pointer. Either a type parameter or a projection
1294 /// depending on one, with regions erased.
1299 impl<'a, 'tcx> SizeSkeleton<'tcx> {
1300 pub fn compute(ty: Ty<'tcx>,
1301 tcx: TyCtxt<'a, 'tcx, 'tcx>,
1302 param_env: ty::ParamEnv<'tcx>)
1303 -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1304 debug_assert!(!ty.has_infer_types());
1306 // First try computing a static layout.
1307 let err = match tcx.layout_of(param_env.and(ty)) {
1309 return Ok(SizeSkeleton::Known(layout.size));
1315 ty::Ref(_, pointee, _) |
1316 ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1317 let non_zero = !ty.is_unsafe_ptr();
1318 let tail = tcx.struct_tail(pointee);
1320 ty::Param(_) | ty::Projection(_) => {
1321 debug_assert!(tail.has_param_types() || tail.has_self_ty());
1322 Ok(SizeSkeleton::Pointer {
1324 tail: tcx.erase_regions(&tail)
1328 bug!("SizeSkeleton::compute({}): layout errored ({}), yet \
1329 tail `{}` is not a type parameter or a projection",
1335 ty::Adt(def, substs) => {
1336 // Only newtypes and enums w/ nullable pointer optimization.
1337 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
1341 // Get a zero-sized variant or a pointer newtype.
1342 let zero_or_ptr_variant = |i: usize| {
1343 let fields = def.variants[i].fields.iter().map(|field| {
1344 SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env)
1347 for field in fields {
1350 SizeSkeleton::Known(size) => {
1351 if size.bytes() > 0 {
1355 SizeSkeleton::Pointer {..} => {
1366 let v0 = zero_or_ptr_variant(0)?;
1368 if def.variants.len() == 1 {
1369 if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1370 return Ok(SizeSkeleton::Pointer {
1371 non_zero: non_zero || match tcx.layout_scalar_valid_range(def.did) {
1372 (Bound::Included(start), Bound::Unbounded) => start > 0,
1373 (Bound::Included(start), Bound::Included(end)) =>
1374 0 < start && start < end,
1384 let v1 = zero_or_ptr_variant(1)?;
1385 // Nullable pointer enum optimization.
1387 (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None) |
1388 (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
1389 Ok(SizeSkeleton::Pointer {
1398 ty::Projection(_) | ty::Opaque(..) => {
1399 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1400 if ty == normalized {
1403 SizeSkeleton::compute(normalized, tcx, param_env)
1411 pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
1412 match (self, other) {
1413 (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
1414 (SizeSkeleton::Pointer { tail: a, .. },
1415 SizeSkeleton::Pointer { tail: b, .. }) => a == b,
1421 pub trait HasTyCtxt<'tcx>: HasDataLayout {
1422 fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx>;
1425 impl<'a, 'gcx, 'tcx> HasDataLayout for TyCtxt<'a, 'gcx, 'tcx> {
1426 fn data_layout(&self) -> &TargetDataLayout {
1431 impl<'a, 'gcx, 'tcx> HasTyCtxt<'gcx> for TyCtxt<'a, 'gcx, 'tcx> {
1432 fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> {
1437 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
1438 fn data_layout(&self) -> &TargetDataLayout {
1439 self.tcx.data_layout()
1443 impl<'gcx, 'tcx, T: HasTyCtxt<'gcx>> HasTyCtxt<'gcx> for LayoutCx<'tcx, T> {
1444 fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> {
1449 pub trait MaybeResult<T> {
1450 fn from_ok(x: T) -> Self;
1451 fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self;
1454 impl<T> MaybeResult<T> for T {
1455 fn from_ok(x: T) -> Self {
1458 fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self {
1463 impl<T, E> MaybeResult<T> for Result<T, E> {
1464 fn from_ok(x: T) -> Self {
1467 fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self {
1472 pub type TyLayout<'tcx> = ::rustc_target::abi::TyLayout<'tcx, Ty<'tcx>>;
1474 impl<'a, 'tcx> LayoutOf for LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
1476 type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
1478 /// Computes the layout of a type. Note that this implicitly
1479 /// executes in "reveal all" mode.
1480 fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
1481 let param_env = self.param_env.with_reveal_all();
1482 let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1483 let details = self.tcx.layout_raw(param_env.and(ty))?;
1484 let layout = TyLayout {
1489 // NB: This recording is normally disabled; when enabled, it
1490 // can however trigger recursive invocations of `layout_of`.
1491 // Therefore, we execute it *after* the main query has
1492 // completed, to avoid problems around recursive structures
1493 // and the like. (Admittedly, I wasn't able to reproduce a problem
1494 // here, but it seems like the right thing to do. -nmatsakis)
1495 self.record_layout_for_printing(layout);
1501 impl<'a, 'tcx> LayoutOf for LayoutCx<'tcx, ty::query::TyCtxtAt<'a, 'tcx, 'tcx>> {
1503 type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
1505 /// Computes the layout of a type. Note that this implicitly
1506 /// executes in "reveal all" mode.
1507 fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
1508 let param_env = self.param_env.with_reveal_all();
1509 let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1510 let details = self.tcx.layout_raw(param_env.and(ty))?;
1511 let layout = TyLayout {
1516 // NB: This recording is normally disabled; when enabled, it
1517 // can however trigger recursive invocations of `layout_of`.
1518 // Therefore, we execute it *after* the main query has
1519 // completed, to avoid problems around recursive structures
1520 // and the like. (Admittedly, I wasn't able to reproduce a problem
1521 // here, but it seems like the right thing to do. -nmatsakis)
1524 param_env: self.param_env
1526 cx.record_layout_for_printing(layout);
1532 // Helper (inherent) `layout_of` methods to avoid pushing `LayoutCx` to users.
1533 impl TyCtxt<'a, 'tcx, '_> {
1534 /// Computes the layout of a type. Note that this implicitly
1535 /// executes in "reveal all" mode.
1537 pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
1538 -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
1540 tcx: self.global_tcx(),
1541 param_env: param_env_and_ty.param_env
1543 cx.layout_of(param_env_and_ty.value)
1547 impl ty::query::TyCtxtAt<'a, 'tcx, '_> {
1548 /// Computes the layout of a type. Note that this implicitly
1549 /// executes in "reveal all" mode.
1551 pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
1552 -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
1554 tcx: self.global_tcx().at(self.span),
1555 param_env: param_env_and_ty.param_env
1557 cx.layout_of(param_env_and_ty.value)
1561 impl<'a, 'tcx, C> TyLayoutMethods<'tcx, C> for Ty<'tcx>
1562 where C: LayoutOf<Ty = Ty<'tcx>> + HasTyCtxt<'tcx>,
1563 C::TyLayout: MaybeResult<TyLayout<'tcx>>
1565 fn for_variant(this: TyLayout<'tcx>, cx: &C, variant_index: usize) -> TyLayout<'tcx> {
1566 let details = match this.variants {
1567 Variants::Single { index } if index == variant_index => this.details,
1569 Variants::Single { index } => {
1570 // Deny calling for_variant more than once for non-Single enums.
1571 cx.layout_of(this.ty).map_same(|layout| {
1572 assert_eq!(layout.variants, Variants::Single { index });
1576 let fields = match this.ty.sty {
1577 ty::Adt(def, _) => def.variants[variant_index].fields.len(),
1581 tcx.intern_layout(LayoutDetails {
1582 variants: Variants::Single { index: variant_index },
1583 fields: FieldPlacement::Union(fields),
1584 abi: Abi::Uninhabited,
1585 align: tcx.data_layout.i8_align,
1590 Variants::NicheFilling { ref variants, .. } |
1591 Variants::Tagged { ref variants, .. } => {
1592 &variants[variant_index]
1596 assert_eq!(details.variants, Variants::Single { index: variant_index });
1604 fn field(this: TyLayout<'tcx>, cx: &C, i: usize) -> C::TyLayout {
1606 cx.layout_of(match this.ty.sty {
1615 ty::GeneratorWitness(..) |
1617 ty::Dynamic(..) => {
1618 bug!("TyLayout::field_type({:?}): not applicable", this)
1621 // Potentially-fat pointers.
1622 ty::Ref(_, pointee, _) |
1623 ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1624 assert!(i < this.fields.count());
1626 // Reuse the fat *T type as its own thin pointer data field.
1627 // This provides information about e.g. DST struct pointees
1628 // (which may have no non-DST form), and will work as long
1629 // as the `Abi` or `FieldPlacement` is checked by users.
1631 let nil = tcx.mk_unit();
1632 let ptr_ty = if this.ty.is_unsafe_ptr() {
1635 tcx.mk_mut_ref(tcx.types.re_static, nil)
1637 return cx.layout_of(ptr_ty).map_same(|mut ptr_layout| {
1638 ptr_layout.ty = this.ty;
1643 match tcx.struct_tail(pointee).sty {
1645 ty::Str => tcx.types.usize,
1646 ty::Dynamic(_, _) => {
1648 tcx.types.re_static,
1649 tcx.mk_array(tcx.types.usize, 3),
1651 /* FIXME use actual fn pointers
1652 Warning: naively computing the number of entries in the
1653 vtable by counting the methods on the trait + methods on
1654 all parent traits does not work, because some methods can
1655 be not object safe and thus excluded from the vtable.
1656 Increase this counter if you tried to implement this but
1657 failed to do it without duplicating a lot of code from
1658 other places in the compiler: 2
1660 tcx.mk_array(tcx.types.usize, 3),
1661 tcx.mk_array(Option<fn()>),
1665 _ => bug!("TyLayout::field_type({:?}): not applicable", this)
1669 // Arrays and slices.
1670 ty::Array(element, _) |
1671 ty::Slice(element) => element,
1672 ty::Str => tcx.types.u8,
1674 // Tuples, generators and closures.
1675 ty::Closure(def_id, ref substs) => {
1676 substs.upvar_tys(def_id, tcx).nth(i).unwrap()
1679 ty::Generator(def_id, ref substs, _) => {
1680 substs.field_tys(def_id, tcx).nth(i).unwrap()
1683 ty::Tuple(tys) => tys[i],
1685 // SIMD vector types.
1686 ty::Adt(def, ..) if def.repr.simd() => {
1687 this.ty.simd_type(tcx)
1691 ty::Adt(def, substs) => {
1692 match this.variants {
1693 Variants::Single { index } => {
1694 def.variants[index].fields[i].ty(tcx, substs)
1697 // Discriminant field for enums (where applicable).
1698 Variants::Tagged { tag: ref discr, .. } |
1699 Variants::NicheFilling { niche: ref discr, .. } => {
1701 let layout = LayoutDetails::scalar(cx, discr.clone());
1702 return MaybeResult::from_ok(TyLayout {
1703 details: tcx.intern_layout(layout),
1704 ty: discr.value.to_ty(tcx)
1710 ty::Projection(_) | ty::UnnormalizedProjection(..) | ty::Bound(..) |
1711 ty::Opaque(..) | ty::Param(_) | ty::Infer(_) | ty::Error => {
1712 bug!("TyLayout::field_type: unexpected type `{}`", this.ty)
1725 fn reserve<'a, 'tcx>(
1727 cx: &LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>>,
1729 ) -> Option<(u128, Scalar)> {
1730 if count > self.available {
1733 let Scalar { value, valid_range: ref v } = self.scalar;
1734 let bits = value.size(cx).bits();
1735 assert!(bits <= 128);
1736 let max_value = !0u128 >> (128 - bits);
1737 let start = v.end().wrapping_add(1) & max_value;
1738 let end = v.end().wrapping_add(count) & max_value;
1739 Some((start, Scalar { value, valid_range: *v.start()..=end }))
1743 impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
1744 /// Find the offset of a niche leaf field, starting from
1745 /// the given type and recursing through aggregates.
1746 // FIXME(eddyb) traverse already optimized enums.
1747 fn find_niche(&self, layout: TyLayout<'tcx>) -> Result<Option<Niche>, LayoutError<'tcx>> {
1748 let scalar_niche = |scalar: &Scalar, offset| {
1749 let Scalar { value, valid_range: ref v } = *scalar;
1751 let bits = value.size(self).bits();
1752 assert!(bits <= 128);
1753 let max_value = !0u128 >> (128 - bits);
1755 // Find out how many values are outside the valid range.
1756 let available = if v.start() <= v.end() {
1757 v.start() + (max_value - v.end())
1759 v.start() - v.end() - 1
1762 // Give up if there is no niche value available.
1767 Some(Niche { offset, scalar: scalar.clone(), available })
1770 // Locals variables which live across yields are stored
1771 // in the generator type as fields. These may be uninitialized
1772 // so we don't look for niches there.
1773 if let ty::Generator(..) = layout.ty.sty {
1778 Abi::Scalar(ref scalar) => {
1779 return Ok(scalar_niche(scalar, Size::ZERO));
1781 Abi::ScalarPair(ref a, ref b) => {
1782 // HACK(nox): We iter on `b` and then `a` because `max_by_key`
1783 // returns the last maximum.
1784 let niche = iter::once((b, a.value.size(self).abi_align(b.value.align(self))))
1785 .chain(iter::once((a, Size::ZERO)))
1786 .filter_map(|(scalar, offset)| scalar_niche(scalar, offset))
1787 .max_by_key(|niche| niche.available);
1790 Abi::Vector { ref element, .. } => {
1791 return Ok(scalar_niche(element, Size::ZERO));
1796 // Perhaps one of the fields is non-zero, let's recurse and find out.
1797 if let FieldPlacement::Union(_) = layout.fields {
1798 // Only Rust enums have safe-to-inspect fields
1799 // (a discriminant), other unions are unsafe.
1800 if let Variants::Single { .. } = layout.variants {
1804 if let FieldPlacement::Array { .. } = layout.fields {
1805 if layout.fields.count() > 0 {
1806 return self.find_niche(layout.field(self, 0)?);
1811 let mut niche = None;
1812 let mut available = 0;
1813 for i in 0..layout.fields.count() {
1814 if let Some(mut c) = self.find_niche(layout.field(self, i)?)? {
1815 if c.available > available {
1816 available = c.available;
1817 c.offset += layout.fields.offset(i);
1826 impl<'a> HashStable<StableHashingContext<'a>> for Variants {
1827 fn hash_stable<W: StableHasherResult>(&self,
1828 hcx: &mut StableHashingContext<'a>,
1829 hasher: &mut StableHasher<W>) {
1830 use ty::layout::Variants::*;
1831 mem::discriminant(self).hash_stable(hcx, hasher);
1834 Single { index } => {
1835 index.hash_stable(hcx, hasher);
1841 tag.hash_stable(hcx, hasher);
1842 variants.hash_stable(hcx, hasher);
1851 dataful_variant.hash_stable(hcx, hasher);
1852 niche_variants.start().hash_stable(hcx, hasher);
1853 niche_variants.end().hash_stable(hcx, hasher);
1854 niche.hash_stable(hcx, hasher);
1855 niche_start.hash_stable(hcx, hasher);
1856 variants.hash_stable(hcx, hasher);
1862 impl<'a> HashStable<StableHashingContext<'a>> for FieldPlacement {
1863 fn hash_stable<W: StableHasherResult>(&self,
1864 hcx: &mut StableHashingContext<'a>,
1865 hasher: &mut StableHasher<W>) {
1866 use ty::layout::FieldPlacement::*;
1867 mem::discriminant(self).hash_stable(hcx, hasher);
1871 count.hash_stable(hcx, hasher);
1873 Array { count, stride } => {
1874 count.hash_stable(hcx, hasher);
1875 stride.hash_stable(hcx, hasher);
1877 Arbitrary { ref offsets, ref memory_index } => {
1878 offsets.hash_stable(hcx, hasher);
1879 memory_index.hash_stable(hcx, hasher);
1885 impl<'a> HashStable<StableHashingContext<'a>> for Abi {
1886 fn hash_stable<W: StableHasherResult>(&self,
1887 hcx: &mut StableHashingContext<'a>,
1888 hasher: &mut StableHasher<W>) {
1889 use ty::layout::Abi::*;
1890 mem::discriminant(self).hash_stable(hcx, hasher);
1894 Scalar(ref value) => {
1895 value.hash_stable(hcx, hasher);
1897 ScalarPair(ref a, ref b) => {
1898 a.hash_stable(hcx, hasher);
1899 b.hash_stable(hcx, hasher);
1901 Vector { ref element, count } => {
1902 element.hash_stable(hcx, hasher);
1903 count.hash_stable(hcx, hasher);
1905 Aggregate { sized } => {
1906 sized.hash_stable(hcx, hasher);
1912 impl<'a> HashStable<StableHashingContext<'a>> for Scalar {
1913 fn hash_stable<W: StableHasherResult>(&self,
1914 hcx: &mut StableHashingContext<'a>,
1915 hasher: &mut StableHasher<W>) {
1916 let Scalar { value, ref valid_range } = *self;
1917 value.hash_stable(hcx, hasher);
1918 valid_range.start().hash_stable(hcx, hasher);
1919 valid_range.end().hash_stable(hcx, hasher);
1923 impl_stable_hash_for!(struct ::ty::layout::LayoutDetails {
1931 impl_stable_hash_for!(enum ::ty::layout::Integer {
1939 impl_stable_hash_for!(enum ::ty::layout::Primitive {
1940 Int(integer, signed),
1945 impl<'gcx> HashStable<StableHashingContext<'gcx>> for Align {
1946 fn hash_stable<W: StableHasherResult>(&self,
1947 hcx: &mut StableHashingContext<'gcx>,
1948 hasher: &mut StableHasher<W>) {
1949 self.abi().hash_stable(hcx, hasher);
1950 self.pref().hash_stable(hcx, hasher);
1954 impl<'gcx> HashStable<StableHashingContext<'gcx>> for Size {
1955 fn hash_stable<W: StableHasherResult>(&self,
1956 hcx: &mut StableHashingContext<'gcx>,
1957 hasher: &mut StableHasher<W>) {
1958 self.bytes().hash_stable(hcx, hasher);
1962 impl<'a, 'gcx> HashStable<StableHashingContext<'a>> for LayoutError<'gcx>
1964 fn hash_stable<W: StableHasherResult>(&self,
1965 hcx: &mut StableHashingContext<'a>,
1966 hasher: &mut StableHasher<W>) {
1967 use ty::layout::LayoutError::*;
1968 mem::discriminant(self).hash_stable(hcx, hasher);
1972 SizeOverflow(t) => t.hash_stable(hcx, hasher)