1 // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 use session::{self, DataTypeKind};
12 use ty::{self, Ty, TyCtxt, TypeFoldable, ReprOptions};
14 use syntax::ast::{self, IntTy, UintTy};
16 use syntax_pos::DUMMY_SP;
25 use ich::StableHashingContext;
26 use rustc_data_structures::indexed_vec::{IndexVec, Idx};
27 use rustc_data_structures::stable_hasher::{HashStable, StableHasher,
30 pub use rustc_target::abi::*;
32 pub trait IntegerExt {
33 fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, signed: bool) -> Ty<'tcx>;
34 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
35 fn repr_discr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
43 impl IntegerExt for Integer {
44 fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, signed: bool) -> Ty<'tcx> {
45 match (*self, signed) {
46 (I8, false) => tcx.types.u8,
47 (I16, false) => tcx.types.u16,
48 (I32, false) => tcx.types.u32,
49 (I64, false) => tcx.types.u64,
50 (I128, false) => tcx.types.u128,
51 (I8, true) => tcx.types.i8,
52 (I16, true) => tcx.types.i16,
53 (I32, true) => tcx.types.i32,
54 (I64, true) => tcx.types.i64,
55 (I128, true) => tcx.types.i128,
59 /// Get the Integer type from an attr::IntType.
60 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
61 let dl = cx.data_layout();
64 attr::SignedInt(IntTy::I8) | attr::UnsignedInt(UintTy::U8) => I8,
65 attr::SignedInt(IntTy::I16) | attr::UnsignedInt(UintTy::U16) => I16,
66 attr::SignedInt(IntTy::I32) | attr::UnsignedInt(UintTy::U32) => I32,
67 attr::SignedInt(IntTy::I64) | attr::UnsignedInt(UintTy::U64) => I64,
68 attr::SignedInt(IntTy::I128) | attr::UnsignedInt(UintTy::U128) => I128,
69 attr::SignedInt(IntTy::Isize) | attr::UnsignedInt(UintTy::Usize) => {
70 dl.ptr_sized_integer()
75 /// Find the appropriate Integer type and signedness for the given
76 /// signed discriminant range and #[repr] attribute.
77 /// N.B.: u128 values above i128::MAX will be treated as signed, but
78 /// that shouldn't affect anything, other than maybe debuginfo.
79 fn repr_discr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
85 // Theoretically, negative values could be larger in unsigned representation
86 // than the unsigned representation of the signed minimum. However, if there
87 // are any negative values, the only valid unsigned representation is u128
88 // which can fit all i128 values, so the result remains unaffected.
89 let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
90 let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
92 let mut min_from_extern = None;
95 if let Some(ity) = repr.int {
96 let discr = Integer::from_attr(&tcx, ity);
97 let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
99 bug!("Integer::repr_discr: `#[repr]` hint too small for \
100 discriminant range of enum `{}", ty)
102 return (discr, ity.is_signed());
106 match &tcx.sess.target.target.arch[..] {
107 // WARNING: the ARM EABI has two variants; the one corresponding
108 // to `at_least == I32` appears to be used on Linux and NetBSD,
109 // but some systems may use the variant corresponding to no
110 // lower bound. However, we don't run on those yet...?
111 "arm" => min_from_extern = Some(I32),
112 _ => min_from_extern = Some(I32),
116 let at_least = min_from_extern.unwrap_or(min_default);
118 // If there are no negative values, we can use the unsigned fit.
120 (cmp::max(unsigned_fit, at_least), false)
122 (cmp::max(signed_fit, at_least), true)
127 pub trait PrimitiveExt {
128 fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx>;
131 impl PrimitiveExt for Primitive {
132 fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx> {
134 Int(i, signed) => i.to_ty(tcx, signed),
135 Float(FloatTy::F32) => tcx.types.f32,
136 Float(FloatTy::F64) => tcx.types.f64,
137 Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
142 /// The first half of a fat pointer.
144 /// - For a trait object, this is the address of the box.
145 /// - For a slice, this is the base address.
146 pub const FAT_PTR_ADDR: usize = 0;
148 /// The second half of a fat pointer.
150 /// - For a trait object, this is the address of the vtable.
151 /// - For a slice, this is the length.
152 pub const FAT_PTR_EXTRA: usize = 1;
154 #[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable)]
155 pub enum LayoutError<'tcx> {
157 SizeOverflow(Ty<'tcx>)
160 impl<'tcx> fmt::Display for LayoutError<'tcx> {
161 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
163 LayoutError::Unknown(ty) => {
164 write!(f, "the type `{:?}` has an unknown layout", ty)
166 LayoutError::SizeOverflow(ty) => {
167 write!(f, "the type `{:?}` is too big for the current architecture", ty)
173 fn layout_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
174 query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
175 -> Result<&'tcx LayoutDetails, LayoutError<'tcx>>
177 ty::tls::with_related_context(tcx, move |icx| {
178 let rec_limit = *tcx.sess.recursion_limit.get();
179 let (param_env, ty) = query.into_parts();
181 if icx.layout_depth > rec_limit {
183 &format!("overflow representing the type `{}`", ty));
186 // Update the ImplicitCtxt to increase the layout_depth
187 let icx = ty::tls::ImplicitCtxt {
188 layout_depth: icx.layout_depth + 1,
192 ty::tls::enter_context(&icx, |_| {
193 let cx = LayoutCx { tcx, param_env };
194 cx.layout_raw_uncached(ty)
199 pub fn provide(providers: &mut ty::query::Providers<'_>) {
200 *providers = ty::query::Providers {
206 pub struct LayoutCx<'tcx, C> {
208 pub param_env: ty::ParamEnv<'tcx>
211 impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
212 fn layout_raw_uncached(&self, ty: Ty<'tcx>)
213 -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> {
215 let param_env = self.param_env;
216 let dl = self.data_layout();
217 let scalar_unit = |value: Primitive| {
218 let bits = value.size(dl).bits();
219 assert!(bits <= 128);
222 valid_range: 0..=(!0 >> (128 - bits))
225 let scalar = |value: Primitive| {
226 tcx.intern_layout(LayoutDetails::scalar(self, scalar_unit(value)))
228 let scalar_pair = |a: Scalar, b: Scalar| {
229 let b_align = b.value.align(dl);
230 let align = a.value.align(dl).max(b_align).max(dl.aggregate_align);
231 let b_offset = a.value.size(dl).align_to(b_align.abi);
232 let size = (b_offset + b.value.size(dl)).align_to(align.abi);
234 variants: Variants::Single { index: VariantIdx::new(0) },
235 fields: FieldPlacement::Arbitrary {
236 offsets: vec![Size::ZERO, b_offset],
237 memory_index: vec![0, 1]
239 abi: Abi::ScalarPair(a, b),
245 #[derive(Copy, Clone, Debug)]
247 /// A tuple, closure, or univariant which cannot be coerced to unsized.
249 /// A univariant, the last field of which may be coerced to unsized.
251 /// A univariant, but with a prefix of an arbitrary size & alignment (e.g. enum tag).
252 Prefixed(Size, Align),
255 let univariant_uninterned = |fields: &[TyLayout<'_>], repr: &ReprOptions, kind| {
256 let packed = repr.packed();
257 if packed && repr.align > 0 {
258 bug!("struct cannot be packed and aligned");
261 let pack = Align::from_bytes(repr.pack as u64).unwrap();
263 let mut align = if packed {
269 let mut sized = true;
270 let mut offsets = vec![Size::ZERO; fields.len()];
271 let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
273 let mut optimize = !repr.inhibit_struct_field_reordering_opt();
274 if let StructKind::Prefixed(_, align) = kind {
275 optimize &= align.bytes() == 1;
279 let end = if let StructKind::MaybeUnsized = kind {
284 let optimizing = &mut inverse_memory_index[..end];
285 let field_align = |f: &TyLayout<'_>| {
286 if packed { f.align.abi.min(pack) } else { f.align.abi }
289 StructKind::AlwaysSized |
290 StructKind::MaybeUnsized => {
291 optimizing.sort_by_key(|&x| {
292 // Place ZSTs first to avoid "interesting offsets",
293 // especially with only one or two non-ZST fields.
294 let f = &fields[x as usize];
295 (!f.is_zst(), cmp::Reverse(field_align(f)))
298 StructKind::Prefixed(..) => {
299 optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
304 // inverse_memory_index holds field indices by increasing memory offset.
305 // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
306 // We now write field offsets to the corresponding offset slot;
307 // field 5 with offset 0 puts 0 in offsets[5].
308 // At the bottom of this function, we use inverse_memory_index to produce memory_index.
310 let mut offset = Size::ZERO;
312 if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
313 let prefix_align = if packed {
314 prefix_align.min(pack)
318 align = align.max(AbiAndPrefAlign::new(prefix_align));
319 offset = prefix_size.align_to(prefix_align);
322 for &i in &inverse_memory_index {
323 let field = fields[i as usize];
325 bug!("univariant: field #{} of `{}` comes after unsized field",
329 if field.is_unsized() {
333 // Invariant: offset < dl.obj_size_bound() <= 1<<61
334 let field_align = if packed {
335 field.align.min(AbiAndPrefAlign::new(pack))
339 offset = offset.align_to(field_align.abi);
340 align = align.max(field_align);
342 debug!("univariant offset: {:?} field: {:#?}", offset, field);
343 offsets[i as usize] = offset;
345 offset = offset.checked_add(field.size, dl)
346 .ok_or(LayoutError::SizeOverflow(ty))?;
350 let repr_align = repr.align as u64;
351 align = align.max(AbiAndPrefAlign::new(Align::from_bytes(repr_align).unwrap()));
352 debug!("univariant repr_align: {:?}", repr_align);
355 debug!("univariant min_size: {:?}", offset);
356 let min_size = offset;
358 // As stated above, inverse_memory_index holds field indices by increasing offset.
359 // This makes it an already-sorted view of the offsets vec.
360 // To invert it, consider:
361 // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
362 // Field 5 would be the first element, so memory_index is i:
363 // Note: if we didn't optimize, it's already right.
365 let mut memory_index;
367 memory_index = vec![0; inverse_memory_index.len()];
369 for i in 0..inverse_memory_index.len() {
370 memory_index[inverse_memory_index[i] as usize] = i as u32;
373 memory_index = inverse_memory_index;
376 let size = min_size.align_to(align.abi);
377 let mut abi = Abi::Aggregate { sized };
379 // Unpack newtype ABIs and find scalar pairs.
380 if sized && size.bytes() > 0 {
381 // All other fields must be ZSTs, and we need them to all start at 0.
382 let mut zst_offsets =
383 offsets.iter().enumerate().filter(|&(i, _)| fields[i].is_zst());
384 if zst_offsets.all(|(_, o)| o.bytes() == 0) {
385 let mut non_zst_fields =
386 fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
388 match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
389 // We have exactly one non-ZST field.
390 (Some((i, field)), None, None) => {
391 // Field fills the struct and it has a scalar or scalar pair ABI.
392 if offsets[i].bytes() == 0 &&
393 align.abi == field.align.abi &&
396 // For plain scalars, or vectors of them, we can't unpack
397 // newtypes for `#[repr(C)]`, as that affects C ABIs.
398 Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
399 abi = field.abi.clone();
401 // But scalar pairs are Rust-specific and get
402 // treated as aggregates by C ABIs anyway.
403 Abi::ScalarPair(..) => {
404 abi = field.abi.clone();
411 // Two non-ZST fields, and they're both scalars.
412 (Some((i, &TyLayout {
413 details: &LayoutDetails { abi: Abi::Scalar(ref a), .. }, ..
414 })), Some((j, &TyLayout {
415 details: &LayoutDetails { abi: Abi::Scalar(ref b), .. }, ..
417 // Order by the memory placement, not source order.
418 let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
423 let pair = scalar_pair(a.clone(), b.clone());
424 let pair_offsets = match pair.fields {
425 FieldPlacement::Arbitrary {
429 assert_eq!(memory_index, &[0, 1]);
434 if offsets[i] == pair_offsets[0] &&
435 offsets[j] == pair_offsets[1] &&
436 align == pair.align &&
438 // We can use `ScalarPair` only when it matches our
439 // already computed layout (including `#[repr(C)]`).
449 if sized && fields.iter().any(|f| f.abi.is_uninhabited()) {
450 abi = Abi::Uninhabited;
454 variants: Variants::Single { index: VariantIdx::new(0) },
455 fields: FieldPlacement::Arbitrary {
464 let univariant = |fields: &[TyLayout<'_>], repr: &ReprOptions, kind| {
465 Ok(tcx.intern_layout(univariant_uninterned(fields, repr, kind)?))
467 debug_assert!(!ty.has_infer_types());
472 tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
473 value: Int(I8, false),
478 tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
479 value: Int(I32, false),
480 valid_range: 0..=0x10FFFF
484 scalar(Int(Integer::from_attr(dl, attr::SignedInt(ity)), true))
487 scalar(Int(Integer::from_attr(dl, attr::UnsignedInt(ity)), false))
489 ty::Float(fty) => scalar(Float(fty)),
491 let mut ptr = scalar_unit(Pointer);
492 ptr.valid_range = 1..=*ptr.valid_range.end();
493 tcx.intern_layout(LayoutDetails::scalar(self, ptr))
498 tcx.intern_layout(LayoutDetails {
499 variants: Variants::Single { index: VariantIdx::new(0) },
500 fields: FieldPlacement::Union(0),
501 abi: Abi::Uninhabited,
507 // Potentially-fat pointers.
508 ty::Ref(_, pointee, _) |
509 ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
510 let mut data_ptr = scalar_unit(Pointer);
511 if !ty.is_unsafe_ptr() {
512 data_ptr.valid_range = 1..=*data_ptr.valid_range.end();
515 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
516 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
517 return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
520 let unsized_part = tcx.struct_tail(pointee);
521 let metadata = match unsized_part.sty {
523 return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
525 ty::Slice(_) | ty::Str => {
526 scalar_unit(Int(dl.ptr_sized_integer(), false))
529 let mut vtable = scalar_unit(Pointer);
530 vtable.valid_range = 1..=*vtable.valid_range.end();
533 _ => return Err(LayoutError::Unknown(unsized_part))
536 // Effectively a (ptr, meta) tuple.
537 tcx.intern_layout(scalar_pair(data_ptr, metadata))
540 // Arrays and slices.
541 ty::Array(element, mut count) => {
542 if count.has_projections() {
543 count = tcx.normalize_erasing_regions(param_env, count);
544 if count.has_projections() {
545 return Err(LayoutError::Unknown(ty));
549 let element = self.layout_of(element)?;
550 let count = count.unwrap_usize(tcx);
551 let size = element.size.checked_mul(count, dl)
552 .ok_or(LayoutError::SizeOverflow(ty))?;
554 tcx.intern_layout(LayoutDetails {
555 variants: Variants::Single { index: VariantIdx::new(0) },
556 fields: FieldPlacement::Array {
557 stride: element.size,
560 abi: Abi::Aggregate { sized: true },
561 align: element.align,
565 ty::Slice(element) => {
566 let element = self.layout_of(element)?;
567 tcx.intern_layout(LayoutDetails {
568 variants: Variants::Single { index: VariantIdx::new(0) },
569 fields: FieldPlacement::Array {
570 stride: element.size,
573 abi: Abi::Aggregate { sized: false },
574 align: element.align,
579 tcx.intern_layout(LayoutDetails {
580 variants: Variants::Single { index: VariantIdx::new(0) },
581 fields: FieldPlacement::Array {
582 stride: Size::from_bytes(1),
585 abi: Abi::Aggregate { sized: false },
593 univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?
595 ty::Dynamic(..) | ty::Foreign(..) => {
596 let mut unit = univariant_uninterned(&[], &ReprOptions::default(),
597 StructKind::AlwaysSized)?;
599 Abi::Aggregate { ref mut sized } => *sized = false,
602 tcx.intern_layout(unit)
605 // Tuples, generators and closures.
606 ty::Generator(def_id, ref substs, _) => {
607 let tys = substs.field_tys(def_id, tcx);
608 univariant(&tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
609 &ReprOptions::default(),
610 StructKind::AlwaysSized)?
613 ty::Closure(def_id, ref substs) => {
614 let tys = substs.upvar_tys(def_id, tcx);
615 univariant(&tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
616 &ReprOptions::default(),
617 StructKind::AlwaysSized)?
621 let kind = if tys.len() == 0 {
622 StructKind::AlwaysSized
624 StructKind::MaybeUnsized
627 univariant(&tys.iter().map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
628 &ReprOptions::default(), kind)?
631 // SIMD vector types.
632 ty::Adt(def, ..) if def.repr.simd() => {
633 let element = self.layout_of(ty.simd_type(tcx))?;
634 let count = ty.simd_size(tcx) as u64;
636 let scalar = match element.abi {
637 Abi::Scalar(ref scalar) => scalar.clone(),
639 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` with \
640 a non-machine element type `{}`",
644 let size = element.size.checked_mul(count, dl)
645 .ok_or(LayoutError::SizeOverflow(ty))?;
646 let align = dl.vector_align(size);
647 let size = size.align_to(align.abi);
649 tcx.intern_layout(LayoutDetails {
650 variants: Variants::Single { index: VariantIdx::new(0) },
651 fields: FieldPlacement::Array {
652 stride: element.size,
665 ty::Adt(def, substs) => {
666 // Cache the field layouts.
667 let variants = def.variants.iter().map(|v| {
668 v.fields.iter().map(|field| {
669 self.layout_of(field.ty(tcx, substs))
670 }).collect::<Result<Vec<_>, _>>()
671 }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
674 let packed = def.repr.packed();
675 if packed && def.repr.align > 0 {
676 bug!("Union cannot be packed and aligned");
679 let pack = Align::from_bytes(def.repr.pack as u64).unwrap();
681 let mut align = if packed {
687 if def.repr.align > 0 {
688 let repr_align = def.repr.align as u64;
690 AbiAndPrefAlign::new(Align::from_bytes(repr_align).unwrap()));
693 let optimize = !def.repr.inhibit_union_abi_opt();
694 let mut size = Size::ZERO;
695 let mut abi = Abi::Aggregate { sized: true };
696 let index = VariantIdx::new(0);
697 for field in &variants[index] {
698 assert!(!field.is_unsized());
700 let field_align = if packed {
701 field.align.min(AbiAndPrefAlign::new(pack))
705 align = align.max(field_align);
707 // If all non-ZST fields have the same ABI, forward this ABI
708 if optimize && !field.is_zst() {
709 // Normalize scalar_unit to the maximal valid range
710 let field_abi = match &field.abi {
711 Abi::Scalar(x) => Abi::Scalar(scalar_unit(x.value)),
712 Abi::ScalarPair(x, y) => {
714 scalar_unit(x.value),
715 scalar_unit(y.value),
718 Abi::Vector { element: x, count } => {
720 element: scalar_unit(x.value),
725 Abi::Aggregate { .. } => Abi::Aggregate { sized: true },
728 if size == Size::ZERO {
729 // first non ZST: initialize 'abi'
731 } else if abi != field_abi {
732 // different fields have different ABI: reset to Aggregate
733 abi = Abi::Aggregate { sized: true };
737 size = cmp::max(size, field.size);
740 return Ok(tcx.intern_layout(LayoutDetails {
741 variants: Variants::Single { index },
742 fields: FieldPlacement::Union(variants[index].len()),
745 size: size.align_to(align.abi)
749 // A variant is absent if it's uninhabited and only has ZST fields.
750 // Present uninhabited variants only require space for their fields,
751 // but *not* an encoding of the discriminant (e.g. a tag value).
752 // See issue #49298 for more details on the need to leave space
753 // for non-ZST uninhabited data (mostly partial initialization).
754 let absent = |fields: &[TyLayout<'_>]| {
755 let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
756 let is_zst = fields.iter().all(|f| f.is_zst());
757 uninhabited && is_zst
759 let (present_first, present_second) = {
760 let mut present_variants = variants.iter_enumerated().filter_map(|(i, v)| {
767 (present_variants.next(), present_variants.next())
769 if present_first.is_none() {
770 // Uninhabited because it has no variants, or only absent ones.
771 return tcx.layout_raw(param_env.and(tcx.types.never));
774 let is_struct = !def.is_enum() ||
775 // Only one variant is present.
776 (present_second.is_none() &&
777 // Representation optimizations are allowed.
778 !def.repr.inhibit_enum_layout_opt());
780 // Struct, or univariant enum equivalent to a struct.
781 // (Typechecking will reject discriminant-sizing attrs.)
783 let v = present_first.unwrap();
784 let kind = if def.is_enum() || variants[v].len() == 0 {
785 StructKind::AlwaysSized
787 let param_env = tcx.param_env(def.did);
788 let last_field = def.variants[v].fields.last().unwrap();
789 let always_sized = tcx.type_of(last_field.did)
790 .is_sized(tcx.at(DUMMY_SP), param_env);
791 if !always_sized { StructKind::MaybeUnsized }
792 else { StructKind::AlwaysSized }
795 let mut st = univariant_uninterned(&variants[v], &def.repr, kind)?;
796 st.variants = Variants::Single { index: v };
797 let (start, end) = self.tcx.layout_scalar_valid_range(def.did);
799 Abi::Scalar(ref mut scalar) |
800 Abi::ScalarPair(ref mut scalar, _) => {
801 // the asserts ensure that we are not using the
802 // `#[rustc_layout_scalar_valid_range(n)]`
803 // attribute to widen the range of anything as that would probably
804 // result in UB somewhere
805 if let Bound::Included(start) = start {
806 assert!(*scalar.valid_range.start() <= start);
807 scalar.valid_range = start..=*scalar.valid_range.end();
809 if let Bound::Included(end) = end {
810 assert!(*scalar.valid_range.end() >= end);
811 scalar.valid_range = *scalar.valid_range.start()..=end;
815 start == Bound::Unbounded && end == Bound::Unbounded,
816 "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
821 return Ok(tcx.intern_layout(st));
824 // The current code for niche-filling relies on variant indices
825 // instead of actual discriminants, so dataful enums with
826 // explicit discriminants (RFC #2363) would misbehave.
827 let no_explicit_discriminants = def.variants.iter_enumerated()
828 .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
830 // Niche-filling enum optimization.
831 if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
832 let mut dataful_variant = None;
833 let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
835 // Find one non-ZST variant.
836 'variants: for (v, fields) in variants.iter_enumerated() {
842 if dataful_variant.is_none() {
843 dataful_variant = Some(v);
846 dataful_variant = None;
851 niche_variants = *niche_variants.start().min(&v)..=v;
854 if niche_variants.start() > niche_variants.end() {
855 dataful_variant = None;
858 if let Some(i) = dataful_variant {
860 niche_variants.end().as_u32() - niche_variants.start().as_u32() + 1
862 for (field_index, &field) in variants[i].iter().enumerate() {
863 let niche = match self.find_niche(field)? {
864 Some(niche) => niche,
867 let (niche_start, niche_scalar) = match niche.reserve(self, count) {
872 let mut align = dl.aggregate_align;
873 let st = variants.iter_enumerated().map(|(j, v)| {
874 let mut st = univariant_uninterned(v,
875 &def.repr, StructKind::AlwaysSized)?;
876 st.variants = Variants::Single { index: j };
878 align = align.max(st.align);
881 }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
883 let offset = st[i].fields.offset(field_index) + niche.offset;
884 let size = st[i].size;
886 let mut abi = match st[i].abi {
887 Abi::Scalar(_) => Abi::Scalar(niche_scalar.clone()),
888 Abi::ScalarPair(ref first, ref second) => {
889 // We need to use scalar_unit to reset the
890 // valid range to the maximal one for that
891 // primitive, because only the niche is
892 // guaranteed to be initialised, not the
894 if offset.bytes() == 0 {
896 niche_scalar.clone(),
897 scalar_unit(second.value),
901 scalar_unit(first.value),
902 niche_scalar.clone(),
906 _ => Abi::Aggregate { sized: true },
909 if st.iter().all(|v| v.abi.is_uninhabited()) {
910 abi = Abi::Uninhabited;
913 return Ok(tcx.intern_layout(LayoutDetails {
914 variants: Variants::NicheFilling {
921 fields: FieldPlacement::Arbitrary {
922 offsets: vec![offset],
923 memory_index: vec![0]
933 let (mut min, mut max) = (i128::max_value(), i128::min_value());
934 let discr_type = def.repr.discr_type();
935 let bits = Integer::from_attr(self, discr_type).size().bits();
936 for (i, discr) in def.discriminants(tcx) {
937 if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
940 let mut x = discr.val as i128;
941 if discr_type.is_signed() {
942 // sign extend the raw representation to be an i128
943 x = (x << (128 - bits)) >> (128 - bits);
945 if x < min { min = x; }
946 if x > max { max = x; }
948 // We might have no inhabited variants, so pretend there's at least one.
949 if (min, max) == (i128::max_value(), i128::min_value()) {
953 assert!(min <= max, "discriminant range is {}...{}", min, max);
954 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
956 let mut align = dl.aggregate_align;
957 let mut size = Size::ZERO;
959 // We're interested in the smallest alignment, so start large.
960 let mut start_align = Align::from_bytes(256).unwrap();
961 assert_eq!(Integer::for_align(dl, start_align), None);
963 // repr(C) on an enum tells us to make a (tag, union) layout,
964 // so we need to grow the prefix alignment to be at least
965 // the alignment of the union. (This value is used both for
966 // determining the alignment of the overall enum, and the
967 // determining the alignment of the payload after the tag.)
968 let mut prefix_align = min_ity.align(dl).abi;
970 for fields in &variants {
971 for field in fields {
972 prefix_align = prefix_align.max(field.align.abi);
977 // Create the set of structs that represent each variant.
978 let mut layout_variants = variants.iter_enumerated().map(|(i, field_layouts)| {
979 let mut st = univariant_uninterned(&field_layouts,
980 &def.repr, StructKind::Prefixed(min_ity.size(), prefix_align))?;
981 st.variants = Variants::Single { index: i };
982 // Find the first field we can't move later
983 // to make room for a larger discriminant.
984 for field in st.fields.index_by_increasing_offset().map(|j| field_layouts[j]) {
985 if !field.is_zst() || field.align.abi.bytes() != 1 {
986 start_align = start_align.min(field.align.abi);
990 size = cmp::max(size, st.size);
991 align = align.max(st.align);
993 }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
995 // Align the maximum variant size to the largest alignment.
996 size = size.align_to(align.abi);
998 if size.bytes() >= dl.obj_size_bound() {
999 return Err(LayoutError::SizeOverflow(ty));
1002 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
1003 if typeck_ity < min_ity {
1004 // It is a bug if Layout decided on a greater discriminant size than typeck for
1005 // some reason at this point (based on values discriminant can take on). Mostly
1006 // because this discriminant will be loaded, and then stored into variable of
1007 // type calculated by typeck. Consider such case (a bug): typeck decided on
1008 // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1009 // discriminant values. That would be a bug, because then, in codegen, in order
1010 // to store this 16-bit discriminant into 8-bit sized temporary some of the
1011 // space necessary to represent would have to be discarded (or layout is wrong
1012 // on thinking it needs 16 bits)
1013 bug!("layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1014 min_ity, typeck_ity);
1015 // However, it is fine to make discr type however large (as an optimisation)
1016 // after this point – we’ll just truncate the value we load in codegen.
1019 // Check to see if we should use a different type for the
1020 // discriminant. We can safely use a type with the same size
1021 // as the alignment of the first field of each variant.
1022 // We increase the size of the discriminant to avoid LLVM copying
1023 // padding when it doesn't need to. This normally causes unaligned
1024 // load/stores and excessive memcpy/memset operations. By using a
1025 // bigger integer size, LLVM can be sure about its contents and
1026 // won't be so conservative.
1028 // Use the initial field alignment
1029 let mut ity = if def.repr.c() || def.repr.int.is_some() {
1032 Integer::for_align(dl, start_align).unwrap_or(min_ity)
1035 // If the alignment is not larger than the chosen discriminant size,
1036 // don't use the alignment as the final size.
1040 // Patch up the variants' first few fields.
1041 let old_ity_size = min_ity.size();
1042 let new_ity_size = ity.size();
1043 for variant in &mut layout_variants {
1044 match variant.fields {
1045 FieldPlacement::Arbitrary { ref mut offsets, .. } => {
1047 if *i <= old_ity_size {
1048 assert_eq!(*i, old_ity_size);
1052 // We might be making the struct larger.
1053 if variant.size <= old_ity_size {
1054 variant.size = new_ity_size;
1062 let tag_mask = !0u128 >> (128 - ity.size().bits());
1064 value: Int(ity, signed),
1065 valid_range: (min as u128 & tag_mask)..=(max as u128 & tag_mask),
1067 let mut abi = Abi::Aggregate { sized: true };
1068 if tag.value.size(dl) == size {
1069 abi = Abi::Scalar(tag.clone());
1071 // Try to use a ScalarPair for all tagged enums.
1072 let mut common_prim = None;
1073 for (field_layouts, layout_variant) in variants.iter().zip(&layout_variants) {
1074 let offsets = match layout_variant.fields {
1075 FieldPlacement::Arbitrary { ref offsets, .. } => offsets,
1078 let mut fields = field_layouts
1081 .filter(|p| !p.0.is_zst());
1082 let (field, offset) = match (fields.next(), fields.next()) {
1083 (None, None) => continue,
1084 (Some(pair), None) => pair,
1090 let prim = match field.details.abi {
1091 Abi::Scalar(ref scalar) => scalar.value,
1097 if let Some(pair) = common_prim {
1098 // This is pretty conservative. We could go fancier
1099 // by conflating things like i32 and u32, or even
1100 // realising that (u8, u8) could just cohabit with
1102 if pair != (prim, offset) {
1107 common_prim = Some((prim, offset));
1110 if let Some((prim, offset)) = common_prim {
1111 let pair = scalar_pair(tag.clone(), scalar_unit(prim));
1112 let pair_offsets = match pair.fields {
1113 FieldPlacement::Arbitrary {
1117 assert_eq!(memory_index, &[0, 1]);
1122 if pair_offsets[0] == Size::ZERO &&
1123 pair_offsets[1] == *offset &&
1124 align == pair.align &&
1126 // We can use `ScalarPair` only when it matches our
1127 // already computed layout (including `#[repr(C)]`).
1133 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1134 abi = Abi::Uninhabited;
1137 tcx.intern_layout(LayoutDetails {
1138 variants: Variants::Tagged {
1140 variants: layout_variants,
1142 fields: FieldPlacement::Arbitrary {
1143 offsets: vec![Size::ZERO],
1144 memory_index: vec![0]
1152 // Types with no meaningful known layout.
1153 ty::Projection(_) | ty::Opaque(..) => {
1154 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1155 if ty == normalized {
1156 return Err(LayoutError::Unknown(ty));
1158 tcx.layout_raw(param_env.and(normalized))?
1162 ty::Placeholder(..) |
1163 ty::UnnormalizedProjection(..) |
1164 ty::GeneratorWitness(..) |
1166 bug!("LayoutDetails::compute: unexpected type `{}`", ty)
1169 ty::Param(_) | ty::Error => {
1170 return Err(LayoutError::Unknown(ty));
1175 /// This is invoked by the `layout_raw` query to record the final
1176 /// layout of each type.
1178 fn record_layout_for_printing(&self, layout: TyLayout<'tcx>) {
1179 // If we are running with `-Zprint-type-sizes`, record layouts for
1180 // dumping later. Ignore layouts that are done with non-empty
1181 // environments or non-monomorphic layouts, as the user only wants
1182 // to see the stuff resulting from the final codegen session.
1184 !self.tcx.sess.opts.debugging_opts.print_type_sizes ||
1185 layout.ty.has_param_types() ||
1186 layout.ty.has_self_ty() ||
1187 !self.param_env.caller_bounds.is_empty()
1192 self.record_layout_for_printing_outlined(layout)
1195 fn record_layout_for_printing_outlined(&self, layout: TyLayout<'tcx>) {
1196 // (delay format until we actually need it)
1197 let record = |kind, packed, opt_discr_size, variants| {
1198 let type_desc = format!("{:?}", layout.ty);
1199 self.tcx.sess.code_stats.borrow_mut().record_type_size(kind,
1208 let adt_def = match layout.ty.sty {
1209 ty::Adt(ref adt_def, _) => {
1210 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1214 ty::Closure(..) => {
1215 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1216 record(DataTypeKind::Closure, false, None, vec![]);
1221 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1226 let adt_kind = adt_def.adt_kind();
1227 let adt_packed = adt_def.repr.packed();
1229 let build_variant_info = |n: Option<ast::Name>,
1231 layout: TyLayout<'tcx>| {
1232 let mut min_size = Size::ZERO;
1233 let field_info: Vec<_> = flds.iter().enumerate().map(|(i, &name)| {
1234 match layout.field(self, i) {
1236 bug!("no layout found for field {}: `{:?}`", name, err);
1238 Ok(field_layout) => {
1239 let offset = layout.fields.offset(i);
1240 let field_end = offset + field_layout.size;
1241 if min_size < field_end {
1242 min_size = field_end;
1244 session::FieldInfo {
1245 name: name.to_string(),
1246 offset: offset.bytes(),
1247 size: field_layout.size.bytes(),
1248 align: field_layout.align.abi.bytes(),
1254 session::VariantInfo {
1255 name: n.map(|n|n.to_string()),
1256 kind: if layout.is_unsized() {
1257 session::SizeKind::Min
1259 session::SizeKind::Exact
1261 align: layout.align.abi.bytes(),
1262 size: if min_size.bytes() == 0 {
1271 match layout.variants {
1272 Variants::Single { index } => {
1273 debug!("print-type-size `{:#?}` variant {}",
1274 layout, adt_def.variants[index].name);
1275 if !adt_def.variants.is_empty() {
1276 let variant_def = &adt_def.variants[index];
1277 let fields: Vec<_> =
1278 variant_def.fields.iter().map(|f| f.ident.name).collect();
1279 record(adt_kind.into(),
1282 vec![build_variant_info(Some(variant_def.name),
1286 // (This case arises for *empty* enums; so give it
1288 record(adt_kind.into(), adt_packed, None, vec![]);
1292 Variants::NicheFilling { .. } |
1293 Variants::Tagged { .. } => {
1294 debug!("print-type-size `{:#?}` adt general variants def {}",
1295 layout.ty, adt_def.variants.len());
1296 let variant_infos: Vec<_> =
1297 adt_def.variants.iter_enumerated().map(|(i, variant_def)| {
1298 let fields: Vec<_> =
1299 variant_def.fields.iter().map(|f| f.ident.name).collect();
1300 build_variant_info(Some(variant_def.name),
1302 layout.for_variant(self, i))
1305 record(adt_kind.into(), adt_packed, match layout.variants {
1306 Variants::Tagged { ref tag, .. } => Some(tag.value.size(self)),
1314 /// Type size "skeleton", i.e. the only information determining a type's size.
1315 /// While this is conservative, (aside from constant sizes, only pointers,
1316 /// newtypes thereof and null pointer optimized enums are allowed), it is
1317 /// enough to statically check common use cases of transmute.
1318 #[derive(Copy, Clone, Debug)]
1319 pub enum SizeSkeleton<'tcx> {
1320 /// Any statically computable Layout.
1323 /// A potentially-fat pointer.
1325 /// If true, this pointer is never null.
1327 /// The type which determines the unsized metadata, if any,
1328 /// of this pointer. Either a type parameter or a projection
1329 /// depending on one, with regions erased.
1334 impl<'a, 'tcx> SizeSkeleton<'tcx> {
1335 pub fn compute(ty: Ty<'tcx>,
1336 tcx: TyCtxt<'a, 'tcx, 'tcx>,
1337 param_env: ty::ParamEnv<'tcx>)
1338 -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1339 debug_assert!(!ty.has_infer_types());
1341 // First try computing a static layout.
1342 let err = match tcx.layout_of(param_env.and(ty)) {
1344 return Ok(SizeSkeleton::Known(layout.size));
1350 ty::Ref(_, pointee, _) |
1351 ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1352 let non_zero = !ty.is_unsafe_ptr();
1353 let tail = tcx.struct_tail(pointee);
1355 ty::Param(_) | ty::Projection(_) => {
1356 debug_assert!(tail.has_param_types() || tail.has_self_ty());
1357 Ok(SizeSkeleton::Pointer {
1359 tail: tcx.erase_regions(&tail)
1363 bug!("SizeSkeleton::compute({}): layout errored ({}), yet \
1364 tail `{}` is not a type parameter or a projection",
1370 ty::Adt(def, substs) => {
1371 // Only newtypes and enums w/ nullable pointer optimization.
1372 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
1376 // Get a zero-sized variant or a pointer newtype.
1377 let zero_or_ptr_variant = |i| {
1378 let i = VariantIdx::new(i);
1379 let fields = def.variants[i].fields.iter().map(|field| {
1380 SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env)
1383 for field in fields {
1386 SizeSkeleton::Known(size) => {
1387 if size.bytes() > 0 {
1391 SizeSkeleton::Pointer {..} => {
1402 let v0 = zero_or_ptr_variant(0)?;
1404 if def.variants.len() == 1 {
1405 if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1406 return Ok(SizeSkeleton::Pointer {
1407 non_zero: non_zero || match tcx.layout_scalar_valid_range(def.did) {
1408 (Bound::Included(start), Bound::Unbounded) => start > 0,
1409 (Bound::Included(start), Bound::Included(end)) =>
1410 0 < start && start < end,
1420 let v1 = zero_or_ptr_variant(1)?;
1421 // Nullable pointer enum optimization.
1423 (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None) |
1424 (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
1425 Ok(SizeSkeleton::Pointer {
1434 ty::Projection(_) | ty::Opaque(..) => {
1435 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1436 if ty == normalized {
1439 SizeSkeleton::compute(normalized, tcx, param_env)
1447 pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
1448 match (self, other) {
1449 (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
1450 (SizeSkeleton::Pointer { tail: a, .. },
1451 SizeSkeleton::Pointer { tail: b, .. }) => a == b,
1457 pub trait HasTyCtxt<'tcx>: HasDataLayout {
1458 fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx>;
1461 impl<'a, 'gcx, 'tcx> HasDataLayout for TyCtxt<'a, 'gcx, 'tcx> {
1462 fn data_layout(&self) -> &TargetDataLayout {
1467 impl<'a, 'gcx, 'tcx> HasTyCtxt<'gcx> for TyCtxt<'a, 'gcx, 'tcx> {
1468 fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> {
1473 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
1474 fn data_layout(&self) -> &TargetDataLayout {
1475 self.tcx.data_layout()
1479 impl<'gcx, 'tcx, T: HasTyCtxt<'gcx>> HasTyCtxt<'gcx> for LayoutCx<'tcx, T> {
1480 fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> {
1485 pub trait MaybeResult<T> {
1486 fn from_ok(x: T) -> Self;
1487 fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self;
1490 impl<T> MaybeResult<T> for T {
1491 fn from_ok(x: T) -> Self {
1494 fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self {
1499 impl<T, E> MaybeResult<T> for Result<T, E> {
1500 fn from_ok(x: T) -> Self {
1503 fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self {
1508 pub type TyLayout<'tcx> = ::rustc_target::abi::TyLayout<'tcx, Ty<'tcx>>;
1510 impl<'a, 'tcx> LayoutOf for LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
1512 type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
1514 /// Computes the layout of a type. Note that this implicitly
1515 /// executes in "reveal all" mode.
1516 fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
1517 let param_env = self.param_env.with_reveal_all();
1518 let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1519 let details = self.tcx.layout_raw(param_env.and(ty))?;
1520 let layout = TyLayout {
1525 // NB: This recording is normally disabled; when enabled, it
1526 // can however trigger recursive invocations of `layout_of`.
1527 // Therefore, we execute it *after* the main query has
1528 // completed, to avoid problems around recursive structures
1529 // and the like. (Admittedly, I wasn't able to reproduce a problem
1530 // here, but it seems like the right thing to do. -nmatsakis)
1531 self.record_layout_for_printing(layout);
1537 impl<'a, 'tcx> LayoutOf for LayoutCx<'tcx, ty::query::TyCtxtAt<'a, 'tcx, 'tcx>> {
1539 type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
1541 /// Computes the layout of a type. Note that this implicitly
1542 /// executes in "reveal all" mode.
1543 fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
1544 let param_env = self.param_env.with_reveal_all();
1545 let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1546 let details = self.tcx.layout_raw(param_env.and(ty))?;
1547 let layout = TyLayout {
1552 // NB: This recording is normally disabled; when enabled, it
1553 // can however trigger recursive invocations of `layout_of`.
1554 // Therefore, we execute it *after* the main query has
1555 // completed, to avoid problems around recursive structures
1556 // and the like. (Admittedly, I wasn't able to reproduce a problem
1557 // here, but it seems like the right thing to do. -nmatsakis)
1560 param_env: self.param_env
1562 cx.record_layout_for_printing(layout);
1568 // Helper (inherent) `layout_of` methods to avoid pushing `LayoutCx` to users.
1569 impl TyCtxt<'a, 'tcx, '_> {
1570 /// Computes the layout of a type. Note that this implicitly
1571 /// executes in "reveal all" mode.
1573 pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
1574 -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
1576 tcx: self.global_tcx(),
1577 param_env: param_env_and_ty.param_env
1579 cx.layout_of(param_env_and_ty.value)
1583 impl ty::query::TyCtxtAt<'a, 'tcx, '_> {
1584 /// Computes the layout of a type. Note that this implicitly
1585 /// executes in "reveal all" mode.
1587 pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
1588 -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
1590 tcx: self.global_tcx().at(self.span),
1591 param_env: param_env_and_ty.param_env
1593 cx.layout_of(param_env_and_ty.value)
1597 impl<'a, 'tcx, C> TyLayoutMethods<'tcx, C> for Ty<'tcx>
1598 where C: LayoutOf<Ty = Ty<'tcx>> + HasTyCtxt<'tcx>,
1599 C::TyLayout: MaybeResult<TyLayout<'tcx>>
1601 fn for_variant(this: TyLayout<'tcx>, cx: &C, variant_index: VariantIdx) -> TyLayout<'tcx> {
1602 let details = match this.variants {
1603 Variants::Single { index } if index == variant_index => this.details,
1605 Variants::Single { index } => {
1606 // Deny calling for_variant more than once for non-Single enums.
1607 cx.layout_of(this.ty).map_same(|layout| {
1608 assert_eq!(layout.variants, Variants::Single { index });
1612 let fields = match this.ty.sty {
1613 ty::Adt(def, _) => def.variants[variant_index].fields.len(),
1617 tcx.intern_layout(LayoutDetails {
1618 variants: Variants::Single { index: variant_index },
1619 fields: FieldPlacement::Union(fields),
1620 abi: Abi::Uninhabited,
1621 align: tcx.data_layout.i8_align,
1626 Variants::NicheFilling { ref variants, .. } |
1627 Variants::Tagged { ref variants, .. } => {
1628 &variants[variant_index]
1632 assert_eq!(details.variants, Variants::Single { index: variant_index });
1640 fn field(this: TyLayout<'tcx>, cx: &C, i: usize) -> C::TyLayout {
1642 cx.layout_of(match this.ty.sty {
1651 ty::GeneratorWitness(..) |
1653 ty::Dynamic(..) => {
1654 bug!("TyLayout::field_type({:?}): not applicable", this)
1657 // Potentially-fat pointers.
1658 ty::Ref(_, pointee, _) |
1659 ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1660 assert!(i < this.fields.count());
1662 // Reuse the fat *T type as its own thin pointer data field.
1663 // This provides information about e.g. DST struct pointees
1664 // (which may have no non-DST form), and will work as long
1665 // as the `Abi` or `FieldPlacement` is checked by users.
1667 let nil = tcx.mk_unit();
1668 let ptr_ty = if this.ty.is_unsafe_ptr() {
1671 tcx.mk_mut_ref(tcx.types.re_static, nil)
1673 return cx.layout_of(ptr_ty).map_same(|mut ptr_layout| {
1674 ptr_layout.ty = this.ty;
1679 match tcx.struct_tail(pointee).sty {
1681 ty::Str => tcx.types.usize,
1682 ty::Dynamic(_, _) => {
1684 tcx.types.re_static,
1685 tcx.mk_array(tcx.types.usize, 3),
1687 /* FIXME use actual fn pointers
1688 Warning: naively computing the number of entries in the
1689 vtable by counting the methods on the trait + methods on
1690 all parent traits does not work, because some methods can
1691 be not object safe and thus excluded from the vtable.
1692 Increase this counter if you tried to implement this but
1693 failed to do it without duplicating a lot of code from
1694 other places in the compiler: 2
1696 tcx.mk_array(tcx.types.usize, 3),
1697 tcx.mk_array(Option<fn()>),
1701 _ => bug!("TyLayout::field_type({:?}): not applicable", this)
1705 // Arrays and slices.
1706 ty::Array(element, _) |
1707 ty::Slice(element) => element,
1708 ty::Str => tcx.types.u8,
1710 // Tuples, generators and closures.
1711 ty::Closure(def_id, ref substs) => {
1712 substs.upvar_tys(def_id, tcx).nth(i).unwrap()
1715 ty::Generator(def_id, ref substs, _) => {
1716 substs.field_tys(def_id, tcx).nth(i).unwrap()
1719 ty::Tuple(tys) => tys[i],
1721 // SIMD vector types.
1722 ty::Adt(def, ..) if def.repr.simd() => {
1723 this.ty.simd_type(tcx)
1727 ty::Adt(def, substs) => {
1728 match this.variants {
1729 Variants::Single { index } => {
1730 def.variants[index].fields[i].ty(tcx, substs)
1733 // Discriminant field for enums (where applicable).
1734 Variants::Tagged { tag: ref discr, .. } |
1735 Variants::NicheFilling { niche: ref discr, .. } => {
1737 let layout = LayoutDetails::scalar(cx, discr.clone());
1738 return MaybeResult::from_ok(TyLayout {
1739 details: tcx.intern_layout(layout),
1740 ty: discr.value.to_ty(tcx)
1746 ty::Projection(_) | ty::UnnormalizedProjection(..) | ty::Bound(..) |
1747 ty::Placeholder(..) | ty::Opaque(..) | ty::Param(_) | ty::Infer(_) |
1749 bug!("TyLayout::field_type: unexpected type `{}`", this.ty)
1762 fn reserve<'a, 'tcx>(
1764 cx: &LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>>,
1766 ) -> Option<(u128, Scalar)> {
1767 if count > self.available {
1770 let Scalar { value, valid_range: ref v } = self.scalar;
1771 let bits = value.size(cx).bits();
1772 assert!(bits <= 128);
1773 let max_value = !0u128 >> (128 - bits);
1774 let start = v.end().wrapping_add(1) & max_value;
1775 let end = v.end().wrapping_add(count) & max_value;
1776 Some((start, Scalar { value, valid_range: *v.start()..=end }))
1780 impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
1781 /// Find the offset of a niche leaf field, starting from
1782 /// the given type and recursing through aggregates.
1783 // FIXME(eddyb) traverse already optimized enums.
1784 fn find_niche(&self, layout: TyLayout<'tcx>) -> Result<Option<Niche>, LayoutError<'tcx>> {
1785 let scalar_niche = |scalar: &Scalar, offset| {
1786 let Scalar { value, valid_range: ref v } = *scalar;
1788 let bits = value.size(self).bits();
1789 assert!(bits <= 128);
1790 let max_value = !0u128 >> (128 - bits);
1792 // Find out how many values are outside the valid range.
1793 let available = if v.start() <= v.end() {
1794 v.start() + (max_value - v.end())
1796 v.start() - v.end() - 1
1799 // Give up if there is no niche value available.
1804 Some(Niche { offset, scalar: scalar.clone(), available })
1807 // Locals variables which live across yields are stored
1808 // in the generator type as fields. These may be uninitialized
1809 // so we don't look for niches there.
1810 if let ty::Generator(..) = layout.ty.sty {
1815 Abi::Scalar(ref scalar) => {
1816 return Ok(scalar_niche(scalar, Size::ZERO));
1818 Abi::ScalarPair(ref a, ref b) => {
1819 // HACK(nox): We iter on `b` and then `a` because `max_by_key`
1820 // returns the last maximum.
1821 let niche = iter::once(
1822 (b, a.value.size(self).align_to(b.value.align(self).abi))
1824 .chain(iter::once((a, Size::ZERO)))
1825 .filter_map(|(scalar, offset)| scalar_niche(scalar, offset))
1826 .max_by_key(|niche| niche.available);
1829 Abi::Vector { ref element, .. } => {
1830 return Ok(scalar_niche(element, Size::ZERO));
1835 // Perhaps one of the fields is non-zero, let's recurse and find out.
1836 if let FieldPlacement::Union(_) = layout.fields {
1837 // Only Rust enums have safe-to-inspect fields
1838 // (a discriminant), other unions are unsafe.
1839 if let Variants::Single { .. } = layout.variants {
1843 if let FieldPlacement::Array { .. } = layout.fields {
1844 if layout.fields.count() > 0 {
1845 return self.find_niche(layout.field(self, 0)?);
1850 let mut niche = None;
1851 let mut available = 0;
1852 for i in 0..layout.fields.count() {
1853 if let Some(mut c) = self.find_niche(layout.field(self, i)?)? {
1854 if c.available > available {
1855 available = c.available;
1856 c.offset += layout.fields.offset(i);
1865 impl<'a> HashStable<StableHashingContext<'a>> for Variants {
1866 fn hash_stable<W: StableHasherResult>(&self,
1867 hcx: &mut StableHashingContext<'a>,
1868 hasher: &mut StableHasher<W>) {
1869 use ty::layout::Variants::*;
1870 mem::discriminant(self).hash_stable(hcx, hasher);
1873 Single { index } => {
1874 index.hash_stable(hcx, hasher);
1880 tag.hash_stable(hcx, hasher);
1881 variants.hash_stable(hcx, hasher);
1890 dataful_variant.hash_stable(hcx, hasher);
1891 niche_variants.start().hash_stable(hcx, hasher);
1892 niche_variants.end().hash_stable(hcx, hasher);
1893 niche.hash_stable(hcx, hasher);
1894 niche_start.hash_stable(hcx, hasher);
1895 variants.hash_stable(hcx, hasher);
1901 impl<'a> HashStable<StableHashingContext<'a>> for FieldPlacement {
1902 fn hash_stable<W: StableHasherResult>(&self,
1903 hcx: &mut StableHashingContext<'a>,
1904 hasher: &mut StableHasher<W>) {
1905 use ty::layout::FieldPlacement::*;
1906 mem::discriminant(self).hash_stable(hcx, hasher);
1910 count.hash_stable(hcx, hasher);
1912 Array { count, stride } => {
1913 count.hash_stable(hcx, hasher);
1914 stride.hash_stable(hcx, hasher);
1916 Arbitrary { ref offsets, ref memory_index } => {
1917 offsets.hash_stable(hcx, hasher);
1918 memory_index.hash_stable(hcx, hasher);
1924 impl<'a> HashStable<StableHashingContext<'a>> for VariantIdx {
1925 fn hash_stable<W: StableHasherResult>(
1927 hcx: &mut StableHashingContext<'a>,
1928 hasher: &mut StableHasher<W>,
1930 self.as_u32().hash_stable(hcx, hasher)
1934 impl<'a> HashStable<StableHashingContext<'a>> for Abi {
1935 fn hash_stable<W: StableHasherResult>(&self,
1936 hcx: &mut StableHashingContext<'a>,
1937 hasher: &mut StableHasher<W>) {
1938 use ty::layout::Abi::*;
1939 mem::discriminant(self).hash_stable(hcx, hasher);
1943 Scalar(ref value) => {
1944 value.hash_stable(hcx, hasher);
1946 ScalarPair(ref a, ref b) => {
1947 a.hash_stable(hcx, hasher);
1948 b.hash_stable(hcx, hasher);
1950 Vector { ref element, count } => {
1951 element.hash_stable(hcx, hasher);
1952 count.hash_stable(hcx, hasher);
1954 Aggregate { sized } => {
1955 sized.hash_stable(hcx, hasher);
1961 impl<'a> HashStable<StableHashingContext<'a>> for Scalar {
1962 fn hash_stable<W: StableHasherResult>(&self,
1963 hcx: &mut StableHashingContext<'a>,
1964 hasher: &mut StableHasher<W>) {
1965 let Scalar { value, ref valid_range } = *self;
1966 value.hash_stable(hcx, hasher);
1967 valid_range.start().hash_stable(hcx, hasher);
1968 valid_range.end().hash_stable(hcx, hasher);
1972 impl_stable_hash_for!(struct ::ty::layout::LayoutDetails {
1980 impl_stable_hash_for!(enum ::ty::layout::Integer {
1988 impl_stable_hash_for!(enum ::ty::layout::Primitive {
1989 Int(integer, signed),
1994 impl_stable_hash_for!(struct ::ty::layout::AbiAndPrefAlign {
1999 impl<'gcx> HashStable<StableHashingContext<'gcx>> for Align {
2000 fn hash_stable<W: StableHasherResult>(&self,
2001 hcx: &mut StableHashingContext<'gcx>,
2002 hasher: &mut StableHasher<W>) {
2003 self.bytes().hash_stable(hcx, hasher);
2007 impl<'gcx> HashStable<StableHashingContext<'gcx>> for Size {
2008 fn hash_stable<W: StableHasherResult>(&self,
2009 hcx: &mut StableHashingContext<'gcx>,
2010 hasher: &mut StableHasher<W>) {
2011 self.bytes().hash_stable(hcx, hasher);
2015 impl<'a, 'gcx> HashStable<StableHashingContext<'a>> for LayoutError<'gcx>
2017 fn hash_stable<W: StableHasherResult>(&self,
2018 hcx: &mut StableHashingContext<'a>,
2019 hasher: &mut StableHasher<W>) {
2020 use ty::layout::LayoutError::*;
2021 mem::discriminant(self).hash_stable(hcx, hasher);
2025 SizeOverflow(t) => t.hash_stable(hcx, hasher)