1 // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 use session::{self, DataTypeKind};
12 use ty::{self, Ty, TyCtxt, TypeFoldable, ReprOptions};
14 use syntax::ast::{self, IntTy, UintTy};
16 use syntax_pos::DUMMY_SP;
25 use ich::StableHashingContext;
26 use rustc_data_structures::indexed_vec::{IndexVec, Idx};
27 use rustc_data_structures::stable_hasher::{HashStable, StableHasher,
30 pub use rustc_target::abi::*;
32 pub trait IntegerExt {
33 fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, signed: bool) -> Ty<'tcx>;
34 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
35 fn repr_discr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
43 impl IntegerExt for Integer {
44 fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, signed: bool) -> Ty<'tcx> {
45 match (*self, signed) {
46 (I8, false) => tcx.types.u8,
47 (I16, false) => tcx.types.u16,
48 (I32, false) => tcx.types.u32,
49 (I64, false) => tcx.types.u64,
50 (I128, false) => tcx.types.u128,
51 (I8, true) => tcx.types.i8,
52 (I16, true) => tcx.types.i16,
53 (I32, true) => tcx.types.i32,
54 (I64, true) => tcx.types.i64,
55 (I128, true) => tcx.types.i128,
59 /// Get the Integer type from an attr::IntType.
60 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
61 let dl = cx.data_layout();
64 attr::SignedInt(IntTy::I8) | attr::UnsignedInt(UintTy::U8) => I8,
65 attr::SignedInt(IntTy::I16) | attr::UnsignedInt(UintTy::U16) => I16,
66 attr::SignedInt(IntTy::I32) | attr::UnsignedInt(UintTy::U32) => I32,
67 attr::SignedInt(IntTy::I64) | attr::UnsignedInt(UintTy::U64) => I64,
68 attr::SignedInt(IntTy::I128) | attr::UnsignedInt(UintTy::U128) => I128,
69 attr::SignedInt(IntTy::Isize) | attr::UnsignedInt(UintTy::Usize) => {
70 dl.ptr_sized_integer()
75 /// Find the appropriate Integer type and signedness for the given
76 /// signed discriminant range and #[repr] attribute.
77 /// N.B.: u128 values above i128::MAX will be treated as signed, but
78 /// that shouldn't affect anything, other than maybe debuginfo.
79 fn repr_discr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
85 // Theoretically, negative values could be larger in unsigned representation
86 // than the unsigned representation of the signed minimum. However, if there
87 // are any negative values, the only valid unsigned representation is u128
88 // which can fit all i128 values, so the result remains unaffected.
89 let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
90 let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
92 let mut min_from_extern = None;
95 if let Some(ity) = repr.int {
96 let discr = Integer::from_attr(&tcx, ity);
97 let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
99 bug!("Integer::repr_discr: `#[repr]` hint too small for \
100 discriminant range of enum `{}", ty)
102 return (discr, ity.is_signed());
106 match &tcx.sess.target.target.arch[..] {
107 // WARNING: the ARM EABI has two variants; the one corresponding
108 // to `at_least == I32` appears to be used on Linux and NetBSD,
109 // but some systems may use the variant corresponding to no
110 // lower bound. However, we don't run on those yet...?
111 "arm" => min_from_extern = Some(I32),
112 _ => min_from_extern = Some(I32),
116 let at_least = min_from_extern.unwrap_or(min_default);
118 // If there are no negative values, we can use the unsigned fit.
120 (cmp::max(unsigned_fit, at_least), false)
122 (cmp::max(signed_fit, at_least), true)
127 pub trait PrimitiveExt {
128 fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx>;
131 impl PrimitiveExt for Primitive {
132 fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx> {
134 Int(i, signed) => i.to_ty(tcx, signed),
135 Float(FloatTy::F32) => tcx.types.f32,
136 Float(FloatTy::F64) => tcx.types.f64,
137 Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
142 /// The first half of a fat pointer.
144 /// - For a trait object, this is the address of the box.
145 /// - For a slice, this is the base address.
146 pub const FAT_PTR_ADDR: usize = 0;
148 /// The second half of a fat pointer.
150 /// - For a trait object, this is the address of the vtable.
151 /// - For a slice, this is the length.
152 pub const FAT_PTR_EXTRA: usize = 1;
154 #[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable)]
155 pub enum LayoutError<'tcx> {
157 SizeOverflow(Ty<'tcx>)
160 impl<'tcx> fmt::Display for LayoutError<'tcx> {
161 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
163 LayoutError::Unknown(ty) => {
164 write!(f, "the type `{:?}` has an unknown layout", ty)
166 LayoutError::SizeOverflow(ty) => {
167 write!(f, "the type `{:?}` is too big for the current architecture", ty)
173 fn layout_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
174 query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
175 -> Result<&'tcx LayoutDetails, LayoutError<'tcx>>
177 ty::tls::with_related_context(tcx, move |icx| {
178 let rec_limit = *tcx.sess.recursion_limit.get();
179 let (param_env, ty) = query.into_parts();
181 if icx.layout_depth > rec_limit {
183 &format!("overflow representing the type `{}`", ty));
186 // Update the ImplicitCtxt to increase the layout_depth
187 let icx = ty::tls::ImplicitCtxt {
188 layout_depth: icx.layout_depth + 1,
192 ty::tls::enter_context(&icx, |_| {
193 let cx = LayoutCx { tcx, param_env };
194 cx.layout_raw_uncached(ty)
199 pub fn provide(providers: &mut ty::query::Providers<'_>) {
200 *providers = ty::query::Providers {
206 pub struct LayoutCx<'tcx, C> {
208 pub param_env: ty::ParamEnv<'tcx>
211 impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
212 fn layout_raw_uncached(&self, ty: Ty<'tcx>)
213 -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> {
215 let param_env = self.param_env;
216 let dl = self.data_layout();
217 let scalar_unit = |value: Primitive| {
218 let bits = value.size(dl).bits();
219 assert!(bits <= 128);
222 valid_range: 0..=(!0 >> (128 - bits))
225 let scalar = |value: Primitive| {
226 tcx.intern_layout(LayoutDetails::scalar(self, scalar_unit(value)))
228 let scalar_pair = |a: Scalar, b: Scalar| {
229 let b_align = b.value.align(dl);
230 let align = a.value.align(dl).max(b_align).max(dl.aggregate_align);
231 let b_offset = a.value.size(dl).align_to(b_align.abi);
232 let size = (b_offset + b.value.size(dl)).align_to(align.abi);
234 variants: Variants::Single { index: VariantIdx::new(0) },
235 fields: FieldPlacement::Arbitrary {
236 offsets: vec![Size::ZERO, b_offset],
237 memory_index: vec![0, 1]
239 abi: Abi::ScalarPair(a, b),
245 #[derive(Copy, Clone, Debug)]
247 /// A tuple, closure, or univariant which cannot be coerced to unsized.
249 /// A univariant, the last field of which may be coerced to unsized.
251 /// A univariant, but with a prefix of an arbitrary size & alignment (e.g. enum tag).
252 Prefixed(Size, Align),
255 let univariant_uninterned = |fields: &[TyLayout<'_>], repr: &ReprOptions, kind| {
256 let packed = repr.packed();
257 if packed && repr.align > 0 {
258 bug!("struct cannot be packed and aligned");
261 let pack = Align::from_bytes(repr.pack as u64).unwrap();
263 let mut align = if packed {
269 let mut sized = true;
270 let mut offsets = vec![Size::ZERO; fields.len()];
271 let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
273 let mut optimize = !repr.inhibit_struct_field_reordering_opt();
274 if let StructKind::Prefixed(_, align) = kind {
275 optimize &= align.bytes() == 1;
279 let end = if let StructKind::MaybeUnsized = kind {
284 let optimizing = &mut inverse_memory_index[..end];
285 let field_align = |f: &TyLayout<'_>| {
286 if packed { f.align.abi.min(pack) } else { f.align.abi }
289 StructKind::AlwaysSized |
290 StructKind::MaybeUnsized => {
291 optimizing.sort_by_key(|&x| {
292 // Place ZSTs first to avoid "interesting offsets",
293 // especially with only one or two non-ZST fields.
294 let f = &fields[x as usize];
295 (!f.is_zst(), cmp::Reverse(field_align(f)))
298 StructKind::Prefixed(..) => {
299 optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
304 // inverse_memory_index holds field indices by increasing memory offset.
305 // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
306 // We now write field offsets to the corresponding offset slot;
307 // field 5 with offset 0 puts 0 in offsets[5].
308 // At the bottom of this function, we use inverse_memory_index to produce memory_index.
310 let mut offset = Size::ZERO;
312 if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
313 let prefix_align = if packed {
314 prefix_align.min(pack)
318 align = align.max(AbiAndPrefAlign::new(prefix_align));
319 offset = prefix_size.align_to(prefix_align);
322 for &i in &inverse_memory_index {
323 let field = fields[i as usize];
325 bug!("univariant: field #{} of `{}` comes after unsized field",
329 if field.is_unsized() {
333 // Invariant: offset < dl.obj_size_bound() <= 1<<61
334 let field_align = if packed {
335 field.align.min(AbiAndPrefAlign::new(pack))
339 offset = offset.align_to(field_align.abi);
340 align = align.max(field_align);
342 debug!("univariant offset: {:?} field: {:#?}", offset, field);
343 offsets[i as usize] = offset;
345 offset = offset.checked_add(field.size, dl)
346 .ok_or(LayoutError::SizeOverflow(ty))?;
350 let repr_align = repr.align as u64;
351 align = align.max(AbiAndPrefAlign::new(Align::from_bytes(repr_align).unwrap()));
352 debug!("univariant repr_align: {:?}", repr_align);
355 debug!("univariant min_size: {:?}", offset);
356 let min_size = offset;
358 // As stated above, inverse_memory_index holds field indices by increasing offset.
359 // This makes it an already-sorted view of the offsets vec.
360 // To invert it, consider:
361 // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
362 // Field 5 would be the first element, so memory_index is i:
363 // Note: if we didn't optimize, it's already right.
365 let mut memory_index;
367 memory_index = vec![0; inverse_memory_index.len()];
369 for i in 0..inverse_memory_index.len() {
370 memory_index[inverse_memory_index[i] as usize] = i as u32;
373 memory_index = inverse_memory_index;
376 let size = min_size.align_to(align.abi);
377 let mut abi = Abi::Aggregate { sized };
379 // Unpack newtype ABIs and find scalar pairs.
380 if sized && size.bytes() > 0 {
381 // All other fields must be ZSTs, and we need them to all start at 0.
382 let mut zst_offsets =
383 offsets.iter().enumerate().filter(|&(i, _)| fields[i].is_zst());
384 if zst_offsets.all(|(_, o)| o.bytes() == 0) {
385 let mut non_zst_fields =
386 fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
388 match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
389 // We have exactly one non-ZST field.
390 (Some((i, field)), None, None) => {
391 // Field fills the struct and it has a scalar or scalar pair ABI.
392 if offsets[i].bytes() == 0 &&
393 align.abi == field.align.abi &&
396 // For plain scalars, or vectors of them, we can't unpack
397 // newtypes for `#[repr(C)]`, as that affects C ABIs.
398 Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
399 abi = field.abi.clone();
401 // But scalar pairs are Rust-specific and get
402 // treated as aggregates by C ABIs anyway.
403 Abi::ScalarPair(..) => {
404 abi = field.abi.clone();
411 // Two non-ZST fields, and they're both scalars.
412 (Some((i, &TyLayout {
413 details: &LayoutDetails { abi: Abi::Scalar(ref a), .. }, ..
414 })), Some((j, &TyLayout {
415 details: &LayoutDetails { abi: Abi::Scalar(ref b), .. }, ..
417 // Order by the memory placement, not source order.
418 let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
423 let pair = scalar_pair(a.clone(), b.clone());
424 let pair_offsets = match pair.fields {
425 FieldPlacement::Arbitrary {
429 assert_eq!(memory_index, &[0, 1]);
434 if offsets[i] == pair_offsets[0] &&
435 offsets[j] == pair_offsets[1] &&
436 align == pair.align &&
438 // We can use `ScalarPair` only when it matches our
439 // already computed layout (including `#[repr(C)]`).
449 if sized && fields.iter().any(|f| f.abi.is_uninhabited()) {
450 abi = Abi::Uninhabited;
454 variants: Variants::Single { index: VariantIdx::new(0) },
455 fields: FieldPlacement::Arbitrary {
464 let univariant = |fields: &[TyLayout<'_>], repr: &ReprOptions, kind| {
465 Ok(tcx.intern_layout(univariant_uninterned(fields, repr, kind)?))
467 debug_assert!(!ty.has_infer_types());
472 tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
473 value: Int(I8, false),
478 tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
479 value: Int(I32, false),
480 valid_range: 0..=0x10FFFF
484 scalar(Int(Integer::from_attr(dl, attr::SignedInt(ity)), true))
487 scalar(Int(Integer::from_attr(dl, attr::UnsignedInt(ity)), false))
489 ty::Float(fty) => scalar(Float(fty)),
491 let mut ptr = scalar_unit(Pointer);
492 ptr.valid_range = 1..=*ptr.valid_range.end();
493 tcx.intern_layout(LayoutDetails::scalar(self, ptr))
498 tcx.intern_layout(LayoutDetails {
499 variants: Variants::Single { index: VariantIdx::new(0) },
500 fields: FieldPlacement::Union(0),
501 abi: Abi::Uninhabited,
507 // Potentially-fat pointers.
508 ty::Ref(_, pointee, _) |
509 ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
510 let mut data_ptr = scalar_unit(Pointer);
511 if !ty.is_unsafe_ptr() {
512 data_ptr.valid_range = 1..=*data_ptr.valid_range.end();
515 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
516 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
517 return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
520 let unsized_part = tcx.struct_tail(pointee);
521 let metadata = match unsized_part.sty {
523 return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
525 ty::Slice(_) | ty::Str => {
526 scalar_unit(Int(dl.ptr_sized_integer(), false))
529 let mut vtable = scalar_unit(Pointer);
530 vtable.valid_range = 1..=*vtable.valid_range.end();
533 _ => return Err(LayoutError::Unknown(unsized_part))
536 // Effectively a (ptr, meta) tuple.
537 tcx.intern_layout(scalar_pair(data_ptr, metadata))
540 // Arrays and slices.
541 ty::Array(element, mut count) => {
542 if count.has_projections() {
543 count = tcx.normalize_erasing_regions(param_env, count);
544 if count.has_projections() {
545 return Err(LayoutError::Unknown(ty));
549 let element = self.layout_of(element)?;
550 let count = count.unwrap_usize(tcx);
551 let size = element.size.checked_mul(count, dl)
552 .ok_or(LayoutError::SizeOverflow(ty))?;
554 tcx.intern_layout(LayoutDetails {
555 variants: Variants::Single { index: VariantIdx::new(0) },
556 fields: FieldPlacement::Array {
557 stride: element.size,
560 abi: Abi::Aggregate { sized: true },
561 align: element.align,
565 ty::Slice(element) => {
566 let element = self.layout_of(element)?;
567 tcx.intern_layout(LayoutDetails {
568 variants: Variants::Single { index: VariantIdx::new(0) },
569 fields: FieldPlacement::Array {
570 stride: element.size,
573 abi: Abi::Aggregate { sized: false },
574 align: element.align,
579 tcx.intern_layout(LayoutDetails {
580 variants: Variants::Single { index: VariantIdx::new(0) },
581 fields: FieldPlacement::Array {
582 stride: Size::from_bytes(1),
585 abi: Abi::Aggregate { sized: false },
593 univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?
595 ty::Dynamic(..) | ty::Foreign(..) => {
596 let mut unit = univariant_uninterned(&[], &ReprOptions::default(),
597 StructKind::AlwaysSized)?;
599 Abi::Aggregate { ref mut sized } => *sized = false,
602 tcx.intern_layout(unit)
605 // Tuples, generators and closures.
606 ty::Generator(def_id, ref substs, _) => {
607 let tys = substs.field_tys(def_id, tcx);
608 univariant(&tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
609 &ReprOptions::default(),
610 StructKind::AlwaysSized)?
613 ty::Closure(def_id, ref substs) => {
614 let tys = substs.upvar_tys(def_id, tcx);
615 univariant(&tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
616 &ReprOptions::default(),
617 StructKind::AlwaysSized)?
621 let kind = if tys.len() == 0 {
622 StructKind::AlwaysSized
624 StructKind::MaybeUnsized
627 univariant(&tys.iter().map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
628 &ReprOptions::default(), kind)?
631 // SIMD vector types.
632 ty::Adt(def, ..) if def.repr.simd() => {
633 let element = self.layout_of(ty.simd_type(tcx))?;
634 let count = ty.simd_size(tcx) as u64;
636 let scalar = match element.abi {
637 Abi::Scalar(ref scalar) => scalar.clone(),
639 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` with \
640 a non-machine element type `{}`",
644 let size = element.size.checked_mul(count, dl)
645 .ok_or(LayoutError::SizeOverflow(ty))?;
646 let align = dl.vector_align(size);
647 let size = size.align_to(align.abi);
649 tcx.intern_layout(LayoutDetails {
650 variants: Variants::Single { index: VariantIdx::new(0) },
651 fields: FieldPlacement::Array {
652 stride: element.size,
665 ty::Adt(def, substs) => {
666 // Cache the field layouts.
667 let variants = def.variants.iter().map(|v| {
668 v.fields.iter().map(|field| {
669 self.layout_of(field.ty(tcx, substs))
670 }).collect::<Result<Vec<_>, _>>()
671 }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
674 let packed = def.repr.packed();
675 if packed && def.repr.align > 0 {
676 bug!("Union cannot be packed and aligned");
679 let pack = Align::from_bytes(def.repr.pack as u64).unwrap();
681 let mut align = if packed {
687 if def.repr.align > 0 {
688 let repr_align = def.repr.align as u64;
690 AbiAndPrefAlign::new(Align::from_bytes(repr_align).unwrap()));
693 let optimize = !def.repr.inhibit_union_abi_opt();
694 let mut size = Size::ZERO;
695 let mut abi = Abi::Aggregate { sized: true };
696 let index = VariantIdx::new(0);
697 for field in &variants[index] {
698 assert!(!field.is_unsized());
700 let field_align = if packed {
701 field.align.min(AbiAndPrefAlign::new(pack))
705 align = align.max(field_align);
707 // If all non-ZST fields have the same ABI, forward this ABI
708 if optimize && !field.is_zst() {
709 // Normalize scalar_unit to the maximal valid range
710 let field_abi = match &field.abi {
711 Abi::Scalar(x) => Abi::Scalar(scalar_unit(x.value)),
712 Abi::ScalarPair(x, y) => {
714 scalar_unit(x.value),
715 scalar_unit(y.value),
718 Abi::Vector { element: x, count } => {
720 element: scalar_unit(x.value),
725 Abi::Aggregate { .. } => Abi::Aggregate { sized: true },
728 if size == Size::ZERO {
729 // first non ZST: initialize 'abi'
731 } else if abi != field_abi {
732 // different fields have different ABI: reset to Aggregate
733 abi = Abi::Aggregate { sized: true };
737 size = cmp::max(size, field.size);
740 return Ok(tcx.intern_layout(LayoutDetails {
741 variants: Variants::Single { index },
742 fields: FieldPlacement::Union(variants[index].len()),
745 size: size.align_to(align.abi)
749 // A variant is absent if it's uninhabited and only has ZST fields.
750 // Present uninhabited variants only require space for their fields,
751 // but *not* an encoding of the discriminant (e.g. a tag value).
752 // See issue #49298 for more details on the need to leave space
753 // for non-ZST uninhabited data (mostly partial initialization).
754 let absent = |fields: &[TyLayout<'_>]| {
755 let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
756 let is_zst = fields.iter().all(|f| f.is_zst());
757 uninhabited && is_zst
759 let (present_first, present_second) = {
760 let mut present_variants = variants.iter_enumerated().filter_map(|(i, v)| {
767 (present_variants.next(), present_variants.next())
769 if present_first.is_none() {
770 // Uninhabited because it has no variants, or only absent ones.
771 return tcx.layout_raw(param_env.and(tcx.types.never));
774 let is_struct = !def.is_enum() ||
775 // Only one variant is present.
776 (present_second.is_none() &&
777 // Representation optimizations are allowed.
778 !def.repr.inhibit_enum_layout_opt());
780 // Struct, or univariant enum equivalent to a struct.
781 // (Typechecking will reject discriminant-sizing attrs.)
783 let v = present_first.unwrap();
784 let kind = if def.is_enum() || variants[v].len() == 0 {
785 StructKind::AlwaysSized
787 let param_env = tcx.param_env(def.did);
788 let last_field = def.variants[v].fields.last().unwrap();
789 let always_sized = tcx.type_of(last_field.did)
790 .is_sized(tcx.at(DUMMY_SP), param_env);
791 if !always_sized { StructKind::MaybeUnsized }
792 else { StructKind::AlwaysSized }
795 let mut st = univariant_uninterned(&variants[v], &def.repr, kind)?;
796 st.variants = Variants::Single { index: v };
797 let (start, end) = self.tcx.layout_scalar_valid_range(def.did);
799 Abi::Scalar(ref mut scalar) |
800 Abi::ScalarPair(ref mut scalar, _) => {
801 // the asserts ensure that we are not using the
802 // `#[rustc_layout_scalar_valid_range(n)]`
803 // attribute to widen the range of anything as that would probably
804 // result in UB somewhere
805 if let Bound::Included(start) = start {
806 assert!(*scalar.valid_range.start() <= start);
807 scalar.valid_range = start..=*scalar.valid_range.end();
809 if let Bound::Included(end) = end {
810 assert!(*scalar.valid_range.end() >= end);
811 scalar.valid_range = *scalar.valid_range.start()..=end;
815 start == Bound::Unbounded && end == Bound::Unbounded,
816 "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
821 return Ok(tcx.intern_layout(st));
824 // The current code for niche-filling relies on variant indices
825 // instead of actual discriminants, so dataful enums with
826 // explicit discriminants (RFC #2363) would misbehave.
827 let no_explicit_discriminants = def.variants.iter_enumerated()
828 .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
830 // Niche-filling enum optimization.
831 if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
832 let mut dataful_variant = None;
833 let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
835 // Find one non-ZST variant.
836 'variants: for (v, fields) in variants.iter_enumerated() {
842 if dataful_variant.is_none() {
843 dataful_variant = Some(v);
846 dataful_variant = None;
851 niche_variants = *niche_variants.start().min(&v)..=v;
854 if niche_variants.start() > niche_variants.end() {
855 dataful_variant = None;
858 if let Some(i) = dataful_variant {
860 niche_variants.end().as_u32() - niche_variants.start().as_u32() + 1
862 for (field_index, &field) in variants[i].iter().enumerate() {
863 let niche = match self.find_niche(field)? {
864 Some(niche) => niche,
867 let (niche_start, niche_scalar) = match niche.reserve(self, count) {
872 let mut align = dl.aggregate_align;
873 let st = variants.iter_enumerated().map(|(j, v)| {
874 let mut st = univariant_uninterned(v,
875 &def.repr, StructKind::AlwaysSized)?;
876 st.variants = Variants::Single { index: j };
878 align = align.max(st.align);
881 }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
883 let offset = st[i].fields.offset(field_index) + niche.offset;
884 let size = st[i].size;
886 let mut abi = match st[i].abi {
887 Abi::Scalar(_) => Abi::Scalar(niche_scalar.clone()),
888 Abi::ScalarPair(ref first, ref second) => {
889 // We need to use scalar_unit to reset the
890 // valid range to the maximal one for that
891 // primitive, because only the niche is
892 // guaranteed to be initialised, not the
894 if offset.bytes() == 0 {
896 niche_scalar.clone(),
897 scalar_unit(second.value),
901 scalar_unit(first.value),
902 niche_scalar.clone(),
906 _ => Abi::Aggregate { sized: true },
909 if st.iter().all(|v| v.abi.is_uninhabited()) {
910 abi = Abi::Uninhabited;
913 return Ok(tcx.intern_layout(LayoutDetails {
914 variants: Variants::NicheFilling {
921 fields: FieldPlacement::Arbitrary {
922 offsets: vec![offset],
923 memory_index: vec![0]
933 let (mut min, mut max) = (i128::max_value(), i128::min_value());
934 let discr_type = def.repr.discr_type();
935 let bits = Integer::from_attr(self, discr_type).size().bits();
936 for (i, discr) in def.discriminants(tcx) {
937 if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
940 let mut x = discr.val as i128;
941 if discr_type.is_signed() {
942 // sign extend the raw representation to be an i128
943 x = (x << (128 - bits)) >> (128 - bits);
945 if x < min { min = x; }
946 if x > max { max = x; }
948 // We might have no inhabited variants, so pretend there's at least one.
949 if (min, max) == (i128::max_value(), i128::min_value()) {
953 assert!(min <= max, "discriminant range is {}...{}", min, max);
954 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
956 let mut align = dl.aggregate_align;
957 let mut size = Size::ZERO;
959 // We're interested in the smallest alignment, so start large.
960 let mut start_align = Align::from_bytes(256).unwrap();
961 assert_eq!(Integer::for_align(dl, start_align), None);
963 // repr(C) on an enum tells us to make a (tag, union) layout,
964 // so we need to grow the prefix alignment to be at least
965 // the alignment of the union. (This value is used both for
966 // determining the alignment of the overall enum, and the
967 // determining the alignment of the payload after the tag.)
968 let mut prefix_align = min_ity.align(dl).abi;
970 for fields in &variants {
971 for field in fields {
972 prefix_align = prefix_align.max(field.align.abi);
977 // Create the set of structs that represent each variant.
978 let mut layout_variants = variants.iter_enumerated().map(|(i, field_layouts)| {
979 let mut st = univariant_uninterned(&field_layouts,
980 &def.repr, StructKind::Prefixed(min_ity.size(), prefix_align))?;
981 st.variants = Variants::Single { index: i };
982 // Find the first field we can't move later
983 // to make room for a larger discriminant.
984 for field in st.fields.index_by_increasing_offset().map(|j| field_layouts[j]) {
985 if !field.is_zst() || field.align.abi.bytes() != 1 {
986 start_align = start_align.min(field.align.abi);
990 size = cmp::max(size, st.size);
991 align = align.max(st.align);
993 }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
995 // Align the maximum variant size to the largest alignment.
996 size = size.align_to(align.abi);
998 if size.bytes() >= dl.obj_size_bound() {
999 return Err(LayoutError::SizeOverflow(ty));
1002 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
1003 if typeck_ity < min_ity {
1004 // It is a bug if Layout decided on a greater discriminant size than typeck for
1005 // some reason at this point (based on values discriminant can take on). Mostly
1006 // because this discriminant will be loaded, and then stored into variable of
1007 // type calculated by typeck. Consider such case (a bug): typeck decided on
1008 // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1009 // discriminant values. That would be a bug, because then, in codegen, in order
1010 // to store this 16-bit discriminant into 8-bit sized temporary some of the
1011 // space necessary to represent would have to be discarded (or layout is wrong
1012 // on thinking it needs 16 bits)
1013 bug!("layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1014 min_ity, typeck_ity);
1015 // However, it is fine to make discr type however large (as an optimisation)
1016 // after this point – we’ll just truncate the value we load in codegen.
1019 // Check to see if we should use a different type for the
1020 // discriminant. We can safely use a type with the same size
1021 // as the alignment of the first field of each variant.
1022 // We increase the size of the discriminant to avoid LLVM copying
1023 // padding when it doesn't need to. This normally causes unaligned
1024 // load/stores and excessive memcpy/memset operations. By using a
1025 // bigger integer size, LLVM can be sure about its contents and
1026 // won't be so conservative.
1028 // Use the initial field alignment
1029 let mut ity = if def.repr.c() || def.repr.int.is_some() {
1032 Integer::for_align(dl, start_align).unwrap_or(min_ity)
1035 // If the alignment is not larger than the chosen discriminant size,
1036 // don't use the alignment as the final size.
1040 // Patch up the variants' first few fields.
1041 let old_ity_size = min_ity.size();
1042 let new_ity_size = ity.size();
1043 for variant in &mut layout_variants {
1044 match variant.fields {
1045 FieldPlacement::Arbitrary { ref mut offsets, .. } => {
1047 if *i <= old_ity_size {
1048 assert_eq!(*i, old_ity_size);
1052 // We might be making the struct larger.
1053 if variant.size <= old_ity_size {
1054 variant.size = new_ity_size;
1062 let tag_mask = !0u128 >> (128 - ity.size().bits());
1064 value: Int(ity, signed),
1065 valid_range: (min as u128 & tag_mask)..=(max as u128 & tag_mask),
1067 let mut abi = Abi::Aggregate { sized: true };
1068 if tag.value.size(dl) == size {
1069 abi = Abi::Scalar(tag.clone());
1071 // Try to use a ScalarPair for all tagged enums.
1072 let mut common_prim = None;
1073 for (field_layouts, layout_variant) in variants.iter().zip(&layout_variants) {
1074 let offsets = match layout_variant.fields {
1075 FieldPlacement::Arbitrary { ref offsets, .. } => offsets,
1078 let mut fields = field_layouts
1081 .filter(|p| !p.0.is_zst());
1082 let (field, offset) = match (fields.next(), fields.next()) {
1083 (None, None) => continue,
1084 (Some(pair), None) => pair,
1090 let prim = match field.details.abi {
1091 Abi::Scalar(ref scalar) => scalar.value,
1097 if let Some(pair) = common_prim {
1098 // This is pretty conservative. We could go fancier
1099 // by conflating things like i32 and u32, or even
1100 // realising that (u8, u8) could just cohabit with
1102 if pair != (prim, offset) {
1107 common_prim = Some((prim, offset));
1110 if let Some((prim, offset)) = common_prim {
1111 let pair = scalar_pair(tag.clone(), scalar_unit(prim));
1112 let pair_offsets = match pair.fields {
1113 FieldPlacement::Arbitrary {
1117 assert_eq!(memory_index, &[0, 1]);
1122 if pair_offsets[0] == Size::ZERO &&
1123 pair_offsets[1] == *offset &&
1124 align == pair.align &&
1126 // We can use `ScalarPair` only when it matches our
1127 // already computed layout (including `#[repr(C)]`).
1133 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1134 abi = Abi::Uninhabited;
1137 tcx.intern_layout(LayoutDetails {
1138 variants: Variants::Tagged {
1140 variants: layout_variants,
1142 fields: FieldPlacement::Arbitrary {
1143 offsets: vec![Size::ZERO],
1144 memory_index: vec![0]
1152 // Types with no meaningful known layout.
1153 ty::Projection(_) | ty::Opaque(..) => {
1154 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1155 if ty == normalized {
1156 return Err(LayoutError::Unknown(ty));
1158 tcx.layout_raw(param_env.and(normalized))?
1162 ty::UnnormalizedProjection(..) |
1163 ty::GeneratorWitness(..) |
1165 bug!("LayoutDetails::compute: unexpected type `{}`", ty)
1168 ty::Param(_) | ty::Error => {
1169 return Err(LayoutError::Unknown(ty));
1174 /// This is invoked by the `layout_raw` query to record the final
1175 /// layout of each type.
1177 fn record_layout_for_printing(&self, layout: TyLayout<'tcx>) {
1178 // If we are running with `-Zprint-type-sizes`, record layouts for
1179 // dumping later. Ignore layouts that are done with non-empty
1180 // environments or non-monomorphic layouts, as the user only wants
1181 // to see the stuff resulting from the final codegen session.
1183 !self.tcx.sess.opts.debugging_opts.print_type_sizes ||
1184 layout.ty.has_param_types() ||
1185 layout.ty.has_self_ty() ||
1186 !self.param_env.caller_bounds.is_empty()
1191 self.record_layout_for_printing_outlined(layout)
1194 fn record_layout_for_printing_outlined(&self, layout: TyLayout<'tcx>) {
1195 // (delay format until we actually need it)
1196 let record = |kind, packed, opt_discr_size, variants| {
1197 let type_desc = format!("{:?}", layout.ty);
1198 self.tcx.sess.code_stats.borrow_mut().record_type_size(kind,
1207 let adt_def = match layout.ty.sty {
1208 ty::Adt(ref adt_def, _) => {
1209 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1213 ty::Closure(..) => {
1214 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1215 record(DataTypeKind::Closure, false, None, vec![]);
1220 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1225 let adt_kind = adt_def.adt_kind();
1226 let adt_packed = adt_def.repr.packed();
1228 let build_variant_info = |n: Option<ast::Name>,
1230 layout: TyLayout<'tcx>| {
1231 let mut min_size = Size::ZERO;
1232 let field_info: Vec<_> = flds.iter().enumerate().map(|(i, &name)| {
1233 match layout.field(self, i) {
1235 bug!("no layout found for field {}: `{:?}`", name, err);
1237 Ok(field_layout) => {
1238 let offset = layout.fields.offset(i);
1239 let field_end = offset + field_layout.size;
1240 if min_size < field_end {
1241 min_size = field_end;
1243 session::FieldInfo {
1244 name: name.to_string(),
1245 offset: offset.bytes(),
1246 size: field_layout.size.bytes(),
1247 align: field_layout.align.abi.bytes(),
1253 session::VariantInfo {
1254 name: n.map(|n|n.to_string()),
1255 kind: if layout.is_unsized() {
1256 session::SizeKind::Min
1258 session::SizeKind::Exact
1260 align: layout.align.abi.bytes(),
1261 size: if min_size.bytes() == 0 {
1270 match layout.variants {
1271 Variants::Single { index } => {
1272 debug!("print-type-size `{:#?}` variant {}",
1273 layout, adt_def.variants[index].name);
1274 if !adt_def.variants.is_empty() {
1275 let variant_def = &adt_def.variants[index];
1276 let fields: Vec<_> =
1277 variant_def.fields.iter().map(|f| f.ident.name).collect();
1278 record(adt_kind.into(),
1281 vec![build_variant_info(Some(variant_def.name),
1285 // (This case arises for *empty* enums; so give it
1287 record(adt_kind.into(), adt_packed, None, vec![]);
1291 Variants::NicheFilling { .. } |
1292 Variants::Tagged { .. } => {
1293 debug!("print-type-size `{:#?}` adt general variants def {}",
1294 layout.ty, adt_def.variants.len());
1295 let variant_infos: Vec<_> =
1296 adt_def.variants.iter_enumerated().map(|(i, variant_def)| {
1297 let fields: Vec<_> =
1298 variant_def.fields.iter().map(|f| f.ident.name).collect();
1299 build_variant_info(Some(variant_def.name),
1301 layout.for_variant(self, i))
1304 record(adt_kind.into(), adt_packed, match layout.variants {
1305 Variants::Tagged { ref tag, .. } => Some(tag.value.size(self)),
1313 /// Type size "skeleton", i.e. the only information determining a type's size.
1314 /// While this is conservative, (aside from constant sizes, only pointers,
1315 /// newtypes thereof and null pointer optimized enums are allowed), it is
1316 /// enough to statically check common use cases of transmute.
1317 #[derive(Copy, Clone, Debug)]
1318 pub enum SizeSkeleton<'tcx> {
1319 /// Any statically computable Layout.
1322 /// A potentially-fat pointer.
1324 /// If true, this pointer is never null.
1326 /// The type which determines the unsized metadata, if any,
1327 /// of this pointer. Either a type parameter or a projection
1328 /// depending on one, with regions erased.
1333 impl<'a, 'tcx> SizeSkeleton<'tcx> {
1334 pub fn compute(ty: Ty<'tcx>,
1335 tcx: TyCtxt<'a, 'tcx, 'tcx>,
1336 param_env: ty::ParamEnv<'tcx>)
1337 -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1338 debug_assert!(!ty.has_infer_types());
1340 // First try computing a static layout.
1341 let err = match tcx.layout_of(param_env.and(ty)) {
1343 return Ok(SizeSkeleton::Known(layout.size));
1349 ty::Ref(_, pointee, _) |
1350 ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1351 let non_zero = !ty.is_unsafe_ptr();
1352 let tail = tcx.struct_tail(pointee);
1354 ty::Param(_) | ty::Projection(_) => {
1355 debug_assert!(tail.has_param_types() || tail.has_self_ty());
1356 Ok(SizeSkeleton::Pointer {
1358 tail: tcx.erase_regions(&tail)
1362 bug!("SizeSkeleton::compute({}): layout errored ({}), yet \
1363 tail `{}` is not a type parameter or a projection",
1369 ty::Adt(def, substs) => {
1370 // Only newtypes and enums w/ nullable pointer optimization.
1371 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
1375 // Get a zero-sized variant or a pointer newtype.
1376 let zero_or_ptr_variant = |i| {
1377 let i = VariantIdx::new(i);
1378 let fields = def.variants[i].fields.iter().map(|field| {
1379 SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env)
1382 for field in fields {
1385 SizeSkeleton::Known(size) => {
1386 if size.bytes() > 0 {
1390 SizeSkeleton::Pointer {..} => {
1401 let v0 = zero_or_ptr_variant(0)?;
1403 if def.variants.len() == 1 {
1404 if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1405 return Ok(SizeSkeleton::Pointer {
1406 non_zero: non_zero || match tcx.layout_scalar_valid_range(def.did) {
1407 (Bound::Included(start), Bound::Unbounded) => start > 0,
1408 (Bound::Included(start), Bound::Included(end)) =>
1409 0 < start && start < end,
1419 let v1 = zero_or_ptr_variant(1)?;
1420 // Nullable pointer enum optimization.
1422 (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None) |
1423 (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
1424 Ok(SizeSkeleton::Pointer {
1433 ty::Projection(_) | ty::Opaque(..) => {
1434 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1435 if ty == normalized {
1438 SizeSkeleton::compute(normalized, tcx, param_env)
1446 pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
1447 match (self, other) {
1448 (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
1449 (SizeSkeleton::Pointer { tail: a, .. },
1450 SizeSkeleton::Pointer { tail: b, .. }) => a == b,
1456 pub trait HasTyCtxt<'tcx>: HasDataLayout {
1457 fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx>;
1460 impl<'a, 'gcx, 'tcx> HasDataLayout for TyCtxt<'a, 'gcx, 'tcx> {
1461 fn data_layout(&self) -> &TargetDataLayout {
1466 impl<'a, 'gcx, 'tcx> HasTyCtxt<'gcx> for TyCtxt<'a, 'gcx, 'tcx> {
1467 fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> {
1472 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
1473 fn data_layout(&self) -> &TargetDataLayout {
1474 self.tcx.data_layout()
1478 impl<'gcx, 'tcx, T: HasTyCtxt<'gcx>> HasTyCtxt<'gcx> for LayoutCx<'tcx, T> {
1479 fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> {
1484 pub trait MaybeResult<T> {
1485 fn from_ok(x: T) -> Self;
1486 fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self;
1489 impl<T> MaybeResult<T> for T {
1490 fn from_ok(x: T) -> Self {
1493 fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self {
1498 impl<T, E> MaybeResult<T> for Result<T, E> {
1499 fn from_ok(x: T) -> Self {
1502 fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self {
1507 pub type TyLayout<'tcx> = ::rustc_target::abi::TyLayout<'tcx, Ty<'tcx>>;
1509 impl<'a, 'tcx> LayoutOf for LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
1511 type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
1513 /// Computes the layout of a type. Note that this implicitly
1514 /// executes in "reveal all" mode.
1515 fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
1516 let param_env = self.param_env.with_reveal_all();
1517 let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1518 let details = self.tcx.layout_raw(param_env.and(ty))?;
1519 let layout = TyLayout {
1524 // NB: This recording is normally disabled; when enabled, it
1525 // can however trigger recursive invocations of `layout_of`.
1526 // Therefore, we execute it *after* the main query has
1527 // completed, to avoid problems around recursive structures
1528 // and the like. (Admittedly, I wasn't able to reproduce a problem
1529 // here, but it seems like the right thing to do. -nmatsakis)
1530 self.record_layout_for_printing(layout);
1536 impl<'a, 'tcx> LayoutOf for LayoutCx<'tcx, ty::query::TyCtxtAt<'a, 'tcx, 'tcx>> {
1538 type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
1540 /// Computes the layout of a type. Note that this implicitly
1541 /// executes in "reveal all" mode.
1542 fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
1543 let param_env = self.param_env.with_reveal_all();
1544 let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1545 let details = self.tcx.layout_raw(param_env.and(ty))?;
1546 let layout = TyLayout {
1551 // NB: This recording is normally disabled; when enabled, it
1552 // can however trigger recursive invocations of `layout_of`.
1553 // Therefore, we execute it *after* the main query has
1554 // completed, to avoid problems around recursive structures
1555 // and the like. (Admittedly, I wasn't able to reproduce a problem
1556 // here, but it seems like the right thing to do. -nmatsakis)
1559 param_env: self.param_env
1561 cx.record_layout_for_printing(layout);
1567 // Helper (inherent) `layout_of` methods to avoid pushing `LayoutCx` to users.
1568 impl TyCtxt<'a, 'tcx, '_> {
1569 /// Computes the layout of a type. Note that this implicitly
1570 /// executes in "reveal all" mode.
1572 pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
1573 -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
1575 tcx: self.global_tcx(),
1576 param_env: param_env_and_ty.param_env
1578 cx.layout_of(param_env_and_ty.value)
1582 impl ty::query::TyCtxtAt<'a, 'tcx, '_> {
1583 /// Computes the layout of a type. Note that this implicitly
1584 /// executes in "reveal all" mode.
1586 pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
1587 -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
1589 tcx: self.global_tcx().at(self.span),
1590 param_env: param_env_and_ty.param_env
1592 cx.layout_of(param_env_and_ty.value)
1596 impl<'a, 'tcx, C> TyLayoutMethods<'tcx, C> for Ty<'tcx>
1597 where C: LayoutOf<Ty = Ty<'tcx>> + HasTyCtxt<'tcx>,
1598 C::TyLayout: MaybeResult<TyLayout<'tcx>>
1600 fn for_variant(this: TyLayout<'tcx>, cx: &C, variant_index: VariantIdx) -> TyLayout<'tcx> {
1601 let details = match this.variants {
1602 Variants::Single { index } if index == variant_index => this.details,
1604 Variants::Single { index } => {
1605 // Deny calling for_variant more than once for non-Single enums.
1606 cx.layout_of(this.ty).map_same(|layout| {
1607 assert_eq!(layout.variants, Variants::Single { index });
1611 let fields = match this.ty.sty {
1612 ty::Adt(def, _) => def.variants[variant_index].fields.len(),
1616 tcx.intern_layout(LayoutDetails {
1617 variants: Variants::Single { index: variant_index },
1618 fields: FieldPlacement::Union(fields),
1619 abi: Abi::Uninhabited,
1620 align: tcx.data_layout.i8_align,
1625 Variants::NicheFilling { ref variants, .. } |
1626 Variants::Tagged { ref variants, .. } => {
1627 &variants[variant_index]
1631 assert_eq!(details.variants, Variants::Single { index: variant_index });
1639 fn field(this: TyLayout<'tcx>, cx: &C, i: usize) -> C::TyLayout {
1641 cx.layout_of(match this.ty.sty {
1650 ty::GeneratorWitness(..) |
1652 ty::Dynamic(..) => {
1653 bug!("TyLayout::field_type({:?}): not applicable", this)
1656 // Potentially-fat pointers.
1657 ty::Ref(_, pointee, _) |
1658 ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1659 assert!(i < this.fields.count());
1661 // Reuse the fat *T type as its own thin pointer data field.
1662 // This provides information about e.g. DST struct pointees
1663 // (which may have no non-DST form), and will work as long
1664 // as the `Abi` or `FieldPlacement` is checked by users.
1666 let nil = tcx.mk_unit();
1667 let ptr_ty = if this.ty.is_unsafe_ptr() {
1670 tcx.mk_mut_ref(tcx.types.re_static, nil)
1672 return cx.layout_of(ptr_ty).map_same(|mut ptr_layout| {
1673 ptr_layout.ty = this.ty;
1678 match tcx.struct_tail(pointee).sty {
1680 ty::Str => tcx.types.usize,
1681 ty::Dynamic(_, _) => {
1683 tcx.types.re_static,
1684 tcx.mk_array(tcx.types.usize, 3),
1686 /* FIXME use actual fn pointers
1687 Warning: naively computing the number of entries in the
1688 vtable by counting the methods on the trait + methods on
1689 all parent traits does not work, because some methods can
1690 be not object safe and thus excluded from the vtable.
1691 Increase this counter if you tried to implement this but
1692 failed to do it without duplicating a lot of code from
1693 other places in the compiler: 2
1695 tcx.mk_array(tcx.types.usize, 3),
1696 tcx.mk_array(Option<fn()>),
1700 _ => bug!("TyLayout::field_type({:?}): not applicable", this)
1704 // Arrays and slices.
1705 ty::Array(element, _) |
1706 ty::Slice(element) => element,
1707 ty::Str => tcx.types.u8,
1709 // Tuples, generators and closures.
1710 ty::Closure(def_id, ref substs) => {
1711 substs.upvar_tys(def_id, tcx).nth(i).unwrap()
1714 ty::Generator(def_id, ref substs, _) => {
1715 substs.field_tys(def_id, tcx).nth(i).unwrap()
1718 ty::Tuple(tys) => tys[i],
1720 // SIMD vector types.
1721 ty::Adt(def, ..) if def.repr.simd() => {
1722 this.ty.simd_type(tcx)
1726 ty::Adt(def, substs) => {
1727 match this.variants {
1728 Variants::Single { index } => {
1729 def.variants[index].fields[i].ty(tcx, substs)
1732 // Discriminant field for enums (where applicable).
1733 Variants::Tagged { tag: ref discr, .. } |
1734 Variants::NicheFilling { niche: ref discr, .. } => {
1736 let layout = LayoutDetails::scalar(cx, discr.clone());
1737 return MaybeResult::from_ok(TyLayout {
1738 details: tcx.intern_layout(layout),
1739 ty: discr.value.to_ty(tcx)
1745 ty::Projection(_) | ty::UnnormalizedProjection(..) | ty::Bound(..) |
1746 ty::Opaque(..) | ty::Param(_) | ty::Infer(_) | ty::Error => {
1747 bug!("TyLayout::field_type: unexpected type `{}`", this.ty)
1760 fn reserve<'a, 'tcx>(
1762 cx: &LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>>,
1764 ) -> Option<(u128, Scalar)> {
1765 if count > self.available {
1768 let Scalar { value, valid_range: ref v } = self.scalar;
1769 let bits = value.size(cx).bits();
1770 assert!(bits <= 128);
1771 let max_value = !0u128 >> (128 - bits);
1772 let start = v.end().wrapping_add(1) & max_value;
1773 let end = v.end().wrapping_add(count) & max_value;
1774 Some((start, Scalar { value, valid_range: *v.start()..=end }))
1778 impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
1779 /// Find the offset of a niche leaf field, starting from
1780 /// the given type and recursing through aggregates.
1781 // FIXME(eddyb) traverse already optimized enums.
1782 fn find_niche(&self, layout: TyLayout<'tcx>) -> Result<Option<Niche>, LayoutError<'tcx>> {
1783 let scalar_niche = |scalar: &Scalar, offset| {
1784 let Scalar { value, valid_range: ref v } = *scalar;
1786 let bits = value.size(self).bits();
1787 assert!(bits <= 128);
1788 let max_value = !0u128 >> (128 - bits);
1790 // Find out how many values are outside the valid range.
1791 let available = if v.start() <= v.end() {
1792 v.start() + (max_value - v.end())
1794 v.start() - v.end() - 1
1797 // Give up if there is no niche value available.
1802 Some(Niche { offset, scalar: scalar.clone(), available })
1805 // Locals variables which live across yields are stored
1806 // in the generator type as fields. These may be uninitialized
1807 // so we don't look for niches there.
1808 if let ty::Generator(..) = layout.ty.sty {
1813 Abi::Scalar(ref scalar) => {
1814 return Ok(scalar_niche(scalar, Size::ZERO));
1816 Abi::ScalarPair(ref a, ref b) => {
1817 // HACK(nox): We iter on `b` and then `a` because `max_by_key`
1818 // returns the last maximum.
1819 let niche = iter::once(
1820 (b, a.value.size(self).align_to(b.value.align(self).abi))
1822 .chain(iter::once((a, Size::ZERO)))
1823 .filter_map(|(scalar, offset)| scalar_niche(scalar, offset))
1824 .max_by_key(|niche| niche.available);
1827 Abi::Vector { ref element, .. } => {
1828 return Ok(scalar_niche(element, Size::ZERO));
1833 // Perhaps one of the fields is non-zero, let's recurse and find out.
1834 if let FieldPlacement::Union(_) = layout.fields {
1835 // Only Rust enums have safe-to-inspect fields
1836 // (a discriminant), other unions are unsafe.
1837 if let Variants::Single { .. } = layout.variants {
1841 if let FieldPlacement::Array { .. } = layout.fields {
1842 if layout.fields.count() > 0 {
1843 return self.find_niche(layout.field(self, 0)?);
1848 let mut niche = None;
1849 let mut available = 0;
1850 for i in 0..layout.fields.count() {
1851 if let Some(mut c) = self.find_niche(layout.field(self, i)?)? {
1852 if c.available > available {
1853 available = c.available;
1854 c.offset += layout.fields.offset(i);
1863 impl<'a> HashStable<StableHashingContext<'a>> for Variants {
1864 fn hash_stable<W: StableHasherResult>(&self,
1865 hcx: &mut StableHashingContext<'a>,
1866 hasher: &mut StableHasher<W>) {
1867 use ty::layout::Variants::*;
1868 mem::discriminant(self).hash_stable(hcx, hasher);
1871 Single { index } => {
1872 index.hash_stable(hcx, hasher);
1878 tag.hash_stable(hcx, hasher);
1879 variants.hash_stable(hcx, hasher);
1888 dataful_variant.hash_stable(hcx, hasher);
1889 niche_variants.start().hash_stable(hcx, hasher);
1890 niche_variants.end().hash_stable(hcx, hasher);
1891 niche.hash_stable(hcx, hasher);
1892 niche_start.hash_stable(hcx, hasher);
1893 variants.hash_stable(hcx, hasher);
1899 impl<'a> HashStable<StableHashingContext<'a>> for FieldPlacement {
1900 fn hash_stable<W: StableHasherResult>(&self,
1901 hcx: &mut StableHashingContext<'a>,
1902 hasher: &mut StableHasher<W>) {
1903 use ty::layout::FieldPlacement::*;
1904 mem::discriminant(self).hash_stable(hcx, hasher);
1908 count.hash_stable(hcx, hasher);
1910 Array { count, stride } => {
1911 count.hash_stable(hcx, hasher);
1912 stride.hash_stable(hcx, hasher);
1914 Arbitrary { ref offsets, ref memory_index } => {
1915 offsets.hash_stable(hcx, hasher);
1916 memory_index.hash_stable(hcx, hasher);
1922 impl<'a> HashStable<StableHashingContext<'a>> for VariantIdx {
1923 fn hash_stable<W: StableHasherResult>(
1925 hcx: &mut StableHashingContext<'a>,
1926 hasher: &mut StableHasher<W>,
1928 self.as_u32().hash_stable(hcx, hasher)
1932 impl<'a> HashStable<StableHashingContext<'a>> for Abi {
1933 fn hash_stable<W: StableHasherResult>(&self,
1934 hcx: &mut StableHashingContext<'a>,
1935 hasher: &mut StableHasher<W>) {
1936 use ty::layout::Abi::*;
1937 mem::discriminant(self).hash_stable(hcx, hasher);
1941 Scalar(ref value) => {
1942 value.hash_stable(hcx, hasher);
1944 ScalarPair(ref a, ref b) => {
1945 a.hash_stable(hcx, hasher);
1946 b.hash_stable(hcx, hasher);
1948 Vector { ref element, count } => {
1949 element.hash_stable(hcx, hasher);
1950 count.hash_stable(hcx, hasher);
1952 Aggregate { sized } => {
1953 sized.hash_stable(hcx, hasher);
1959 impl<'a> HashStable<StableHashingContext<'a>> for Scalar {
1960 fn hash_stable<W: StableHasherResult>(&self,
1961 hcx: &mut StableHashingContext<'a>,
1962 hasher: &mut StableHasher<W>) {
1963 let Scalar { value, ref valid_range } = *self;
1964 value.hash_stable(hcx, hasher);
1965 valid_range.start().hash_stable(hcx, hasher);
1966 valid_range.end().hash_stable(hcx, hasher);
1970 impl_stable_hash_for!(struct ::ty::layout::LayoutDetails {
1978 impl_stable_hash_for!(enum ::ty::layout::Integer {
1986 impl_stable_hash_for!(enum ::ty::layout::Primitive {
1987 Int(integer, signed),
1992 impl_stable_hash_for!(struct ::ty::layout::AbiAndPrefAlign {
1997 impl<'gcx> HashStable<StableHashingContext<'gcx>> for Align {
1998 fn hash_stable<W: StableHasherResult>(&self,
1999 hcx: &mut StableHashingContext<'gcx>,
2000 hasher: &mut StableHasher<W>) {
2001 self.bytes().hash_stable(hcx, hasher);
2005 impl<'gcx> HashStable<StableHashingContext<'gcx>> for Size {
2006 fn hash_stable<W: StableHasherResult>(&self,
2007 hcx: &mut StableHashingContext<'gcx>,
2008 hasher: &mut StableHasher<W>) {
2009 self.bytes().hash_stable(hcx, hasher);
2013 impl<'a, 'gcx> HashStable<StableHashingContext<'a>> for LayoutError<'gcx>
2015 fn hash_stable<W: StableHasherResult>(&self,
2016 hcx: &mut StableHashingContext<'a>,
2017 hasher: &mut StableHasher<W>) {
2018 use ty::layout::LayoutError::*;
2019 mem::discriminant(self).hash_stable(hcx, hasher);
2023 SizeOverflow(t) => t.hash_stable(hcx, hasher)