1 // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 use session::{self, DataTypeKind};
12 use ty::{self, Ty, TyCtxt, TypeFoldable, ReprOptions};
14 use syntax::ast::{self, IntTy, UintTy};
16 use syntax_pos::DUMMY_SP;
25 use ich::StableHashingContext;
26 use rustc_data_structures::stable_hasher::{HashStable, StableHasher,
29 pub use rustc_target::abi::*;
31 pub trait IntegerExt {
32 fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, signed: bool) -> Ty<'tcx>;
33 fn from_attr<C: HasDataLayout>(cx: C, ity: attr::IntType) -> Integer;
34 fn repr_discr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
42 impl IntegerExt for Integer {
43 fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, signed: bool) -> Ty<'tcx> {
44 match (*self, signed) {
45 (I8, false) => tcx.types.u8,
46 (I16, false) => tcx.types.u16,
47 (I32, false) => tcx.types.u32,
48 (I64, false) => tcx.types.u64,
49 (I128, false) => tcx.types.u128,
50 (I8, true) => tcx.types.i8,
51 (I16, true) => tcx.types.i16,
52 (I32, true) => tcx.types.i32,
53 (I64, true) => tcx.types.i64,
54 (I128, true) => tcx.types.i128,
58 /// Get the Integer type from an attr::IntType.
59 fn from_attr<C: HasDataLayout>(cx: C, ity: attr::IntType) -> Integer {
60 let dl = cx.data_layout();
63 attr::SignedInt(IntTy::I8) | attr::UnsignedInt(UintTy::U8) => I8,
64 attr::SignedInt(IntTy::I16) | attr::UnsignedInt(UintTy::U16) => I16,
65 attr::SignedInt(IntTy::I32) | attr::UnsignedInt(UintTy::U32) => I32,
66 attr::SignedInt(IntTy::I64) | attr::UnsignedInt(UintTy::U64) => I64,
67 attr::SignedInt(IntTy::I128) | attr::UnsignedInt(UintTy::U128) => I128,
68 attr::SignedInt(IntTy::Isize) | attr::UnsignedInt(UintTy::Usize) => {
69 dl.ptr_sized_integer()
74 /// Find the appropriate Integer type and signedness for the given
75 /// signed discriminant range and #[repr] attribute.
76 /// N.B.: u128 values above i128::MAX will be treated as signed, but
77 /// that shouldn't affect anything, other than maybe debuginfo.
78 fn repr_discr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
84 // Theoretically, negative values could be larger in unsigned representation
85 // than the unsigned representation of the signed minimum. However, if there
86 // are any negative values, the only valid unsigned representation is u128
87 // which can fit all i128 values, so the result remains unaffected.
88 let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
89 let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
91 let mut min_from_extern = None;
94 if let Some(ity) = repr.int {
95 let discr = Integer::from_attr(tcx, ity);
96 let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
98 bug!("Integer::repr_discr: `#[repr]` hint too small for \
99 discriminant range of enum `{}", ty)
101 return (discr, ity.is_signed());
105 match &tcx.sess.target.target.arch[..] {
106 // WARNING: the ARM EABI has two variants; the one corresponding
107 // to `at_least == I32` appears to be used on Linux and NetBSD,
108 // but some systems may use the variant corresponding to no
109 // lower bound. However, we don't run on those yet...?
110 "arm" => min_from_extern = Some(I32),
111 _ => min_from_extern = Some(I32),
115 let at_least = min_from_extern.unwrap_or(min_default);
117 // If there are no negative values, we can use the unsigned fit.
119 (cmp::max(unsigned_fit, at_least), false)
121 (cmp::max(signed_fit, at_least), true)
126 pub trait PrimitiveExt {
127 fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx>;
130 impl PrimitiveExt for Primitive {
131 fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx> {
133 Int(i, signed) => i.to_ty(tcx, signed),
134 Float(FloatTy::F32) => tcx.types.f32,
135 Float(FloatTy::F64) => tcx.types.f64,
136 Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
141 /// The first half of a fat pointer.
143 /// - For a trait object, this is the address of the box.
144 /// - For a slice, this is the base address.
145 pub const FAT_PTR_ADDR: usize = 0;
147 /// The second half of a fat pointer.
149 /// - For a trait object, this is the address of the vtable.
150 /// - For a slice, this is the length.
151 pub const FAT_PTR_EXTRA: usize = 1;
153 #[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable)]
154 pub enum LayoutError<'tcx> {
156 SizeOverflow(Ty<'tcx>)
159 impl<'tcx> fmt::Display for LayoutError<'tcx> {
160 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
162 LayoutError::Unknown(ty) => {
163 write!(f, "the type `{:?}` has an unknown layout", ty)
165 LayoutError::SizeOverflow(ty) => {
166 write!(f, "the type `{:?}` is too big for the current architecture", ty)
172 fn layout_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
173 query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
174 -> Result<&'tcx LayoutDetails, LayoutError<'tcx>>
176 ty::tls::with_related_context(tcx, move |icx| {
177 let rec_limit = *tcx.sess.recursion_limit.get();
178 let (param_env, ty) = query.into_parts();
180 if icx.layout_depth > rec_limit {
182 &format!("overflow representing the type `{}`", ty));
185 // Update the ImplicitCtxt to increase the layout_depth
186 let icx = ty::tls::ImplicitCtxt {
187 layout_depth: icx.layout_depth + 1,
191 ty::tls::enter_context(&icx, |_| {
192 let cx = LayoutCx { tcx, param_env };
193 cx.layout_raw_uncached(ty)
198 pub fn provide(providers: &mut ty::query::Providers<'_>) {
199 *providers = ty::query::Providers {
205 #[derive(Copy, Clone)]
206 pub struct LayoutCx<'tcx, C> {
208 pub param_env: ty::ParamEnv<'tcx>
211 impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
212 fn layout_raw_uncached(self, ty: Ty<'tcx>)
213 -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> {
215 let param_env = self.param_env;
216 let dl = self.data_layout();
217 let scalar_unit = |value: Primitive| {
218 let bits = value.size(dl).bits();
219 assert!(bits <= 128);
222 valid_range: 0..=(!0 >> (128 - bits))
225 let scalar = |value: Primitive| {
226 tcx.intern_layout(LayoutDetails::scalar(self, scalar_unit(value)))
228 let scalar_pair = |a: Scalar, b: Scalar| {
229 let align = a.value.align(dl).max(b.value.align(dl)).max(dl.aggregate_align);
230 let b_offset = a.value.size(dl).abi_align(b.value.align(dl));
231 let size = (b_offset + b.value.size(dl)).abi_align(align);
233 variants: Variants::Single { index: 0 },
234 fields: FieldPlacement::Arbitrary {
235 offsets: vec![Size::ZERO, b_offset],
236 memory_index: vec![0, 1]
238 abi: Abi::ScalarPair(a, b),
244 #[derive(Copy, Clone, Debug)]
246 /// A tuple, closure, or univariant which cannot be coerced to unsized.
248 /// A univariant, the last field of which may be coerced to unsized.
250 /// A univariant, but with a prefix of an arbitrary size & alignment (e.g. enum tag).
251 Prefixed(Size, Align),
254 let univariant_uninterned = |fields: &[TyLayout<'_>], repr: &ReprOptions, kind| {
255 let packed = repr.packed();
256 if packed && repr.align > 0 {
257 bug!("struct cannot be packed and aligned");
261 let pack = repr.pack as u64;
262 Align::from_bytes(pack, pack).unwrap()
265 let mut align = if packed {
271 let mut sized = true;
272 let mut offsets = vec![Size::ZERO; fields.len()];
273 let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
275 let mut optimize = !repr.inhibit_struct_field_reordering_opt();
276 if let StructKind::Prefixed(_, align) = kind {
277 optimize &= align.abi() == 1;
281 let end = if let StructKind::MaybeUnsized = kind {
286 let optimizing = &mut inverse_memory_index[..end];
287 let field_align = |f: &TyLayout<'_>| {
288 if packed { f.align.min(pack).abi() } else { f.align.abi() }
291 StructKind::AlwaysSized |
292 StructKind::MaybeUnsized => {
293 optimizing.sort_by_key(|&x| {
294 // Place ZSTs first to avoid "interesting offsets",
295 // especially with only one or two non-ZST fields.
296 let f = &fields[x as usize];
297 (!f.is_zst(), cmp::Reverse(field_align(f)))
300 StructKind::Prefixed(..) => {
301 optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
306 // inverse_memory_index holds field indices by increasing memory offset.
307 // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
308 // We now write field offsets to the corresponding offset slot;
309 // field 5 with offset 0 puts 0 in offsets[5].
310 // At the bottom of this function, we use inverse_memory_index to produce memory_index.
312 let mut offset = Size::ZERO;
314 if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
316 let prefix_align = prefix_align.min(pack);
317 align = align.max(prefix_align);
319 align = align.max(prefix_align);
321 offset = prefix_size.abi_align(prefix_align);
324 for &i in &inverse_memory_index {
325 let field = fields[i as usize];
327 bug!("univariant: field #{} of `{}` comes after unsized field",
331 if field.is_unsized() {
335 // Invariant: offset < dl.obj_size_bound() <= 1<<61
337 let field_pack = field.align.min(pack);
338 offset = offset.abi_align(field_pack);
339 align = align.max(field_pack);
342 offset = offset.abi_align(field.align);
343 align = align.max(field.align);
346 debug!("univariant offset: {:?} field: {:#?}", offset, field);
347 offsets[i as usize] = offset;
349 offset = offset.checked_add(field.size, dl)
350 .ok_or(LayoutError::SizeOverflow(ty))?;
354 let repr_align = repr.align as u64;
355 align = align.max(Align::from_bytes(repr_align, repr_align).unwrap());
356 debug!("univariant repr_align: {:?}", repr_align);
359 debug!("univariant min_size: {:?}", offset);
360 let min_size = offset;
362 // As stated above, inverse_memory_index holds field indices by increasing offset.
363 // This makes it an already-sorted view of the offsets vec.
364 // To invert it, consider:
365 // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
366 // Field 5 would be the first element, so memory_index is i:
367 // Note: if we didn't optimize, it's already right.
369 let mut memory_index;
371 memory_index = vec![0; inverse_memory_index.len()];
373 for i in 0..inverse_memory_index.len() {
374 memory_index[inverse_memory_index[i] as usize] = i as u32;
377 memory_index = inverse_memory_index;
380 let size = min_size.abi_align(align);
381 let mut abi = Abi::Aggregate { sized };
383 // Unpack newtype ABIs and find scalar pairs.
384 if sized && size.bytes() > 0 {
385 // All other fields must be ZSTs, and we need them to all start at 0.
386 let mut zst_offsets =
387 offsets.iter().enumerate().filter(|&(i, _)| fields[i].is_zst());
388 if zst_offsets.all(|(_, o)| o.bytes() == 0) {
389 let mut non_zst_fields =
390 fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
392 match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
393 // We have exactly one non-ZST field.
394 (Some((i, field)), None, None) => {
395 // Field fills the struct and it has a scalar or scalar pair ABI.
396 if offsets[i].bytes() == 0 &&
397 align.abi() == field.align.abi() &&
400 // For plain scalars, or vectors of them, we can't unpack
401 // newtypes for `#[repr(C)]`, as that affects C ABIs.
402 Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
403 abi = field.abi.clone();
405 // But scalar pairs are Rust-specific and get
406 // treated as aggregates by C ABIs anyway.
407 Abi::ScalarPair(..) => {
408 abi = field.abi.clone();
415 // Two non-ZST fields, and they're both scalars.
416 (Some((i, &TyLayout {
417 details: &LayoutDetails { abi: Abi::Scalar(ref a), .. }, ..
418 })), Some((j, &TyLayout {
419 details: &LayoutDetails { abi: Abi::Scalar(ref b), .. }, ..
421 // Order by the memory placement, not source order.
422 let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
427 let pair = scalar_pair(a.clone(), b.clone());
428 let pair_offsets = match pair.fields {
429 FieldPlacement::Arbitrary {
433 assert_eq!(memory_index, &[0, 1]);
438 if offsets[i] == pair_offsets[0] &&
439 offsets[j] == pair_offsets[1] &&
440 align == pair.align &&
442 // We can use `ScalarPair` only when it matches our
443 // already computed layout (including `#[repr(C)]`).
453 if sized && fields.iter().any(|f| f.abi.is_uninhabited()) {
454 abi = Abi::Uninhabited;
458 variants: Variants::Single { index: 0 },
459 fields: FieldPlacement::Arbitrary {
468 let univariant = |fields: &[TyLayout<'_>], repr: &ReprOptions, kind| {
469 Ok(tcx.intern_layout(univariant_uninterned(fields, repr, kind)?))
471 debug_assert!(!ty.has_infer_types());
476 tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
477 value: Int(I8, false),
482 tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
483 value: Int(I32, false),
484 valid_range: 0..=0x10FFFF
488 scalar(Int(Integer::from_attr(dl, attr::SignedInt(ity)), true))
491 scalar(Int(Integer::from_attr(dl, attr::UnsignedInt(ity)), false))
493 ty::Float(fty) => scalar(Float(fty)),
495 let mut ptr = scalar_unit(Pointer);
496 ptr.valid_range = 1..=*ptr.valid_range.end();
497 tcx.intern_layout(LayoutDetails::scalar(self, ptr))
502 tcx.intern_layout(LayoutDetails {
503 variants: Variants::Single { index: 0 },
504 fields: FieldPlacement::Union(0),
505 abi: Abi::Uninhabited,
511 // Potentially-fat pointers.
512 ty::Ref(_, pointee, _) |
513 ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
514 let mut data_ptr = scalar_unit(Pointer);
515 if !ty.is_unsafe_ptr() {
516 data_ptr.valid_range = 1..=*data_ptr.valid_range.end();
519 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
520 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
521 return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
524 let unsized_part = tcx.struct_tail(pointee);
525 let metadata = match unsized_part.sty {
527 return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
529 ty::Slice(_) | ty::Str => {
530 scalar_unit(Int(dl.ptr_sized_integer(), false))
533 let mut vtable = scalar_unit(Pointer);
534 vtable.valid_range = 1..=*vtable.valid_range.end();
537 _ => return Err(LayoutError::Unknown(unsized_part))
540 // Effectively a (ptr, meta) tuple.
541 tcx.intern_layout(scalar_pair(data_ptr, metadata))
544 // Arrays and slices.
545 ty::Array(element, mut count) => {
546 if count.has_projections() {
547 count = tcx.normalize_erasing_regions(param_env, count);
548 if count.has_projections() {
549 return Err(LayoutError::Unknown(ty));
553 let element = self.layout_of(element)?;
554 let count = count.unwrap_usize(tcx);
555 let size = element.size.checked_mul(count, dl)
556 .ok_or(LayoutError::SizeOverflow(ty))?;
558 tcx.intern_layout(LayoutDetails {
559 variants: Variants::Single { index: 0 },
560 fields: FieldPlacement::Array {
561 stride: element.size,
564 abi: Abi::Aggregate { sized: true },
565 align: element.align,
569 ty::Slice(element) => {
570 let element = self.layout_of(element)?;
571 tcx.intern_layout(LayoutDetails {
572 variants: Variants::Single { index: 0 },
573 fields: FieldPlacement::Array {
574 stride: element.size,
577 abi: Abi::Aggregate { sized: false },
578 align: element.align,
583 tcx.intern_layout(LayoutDetails {
584 variants: Variants::Single { index: 0 },
585 fields: FieldPlacement::Array {
586 stride: Size::from_bytes(1),
589 abi: Abi::Aggregate { sized: false },
597 univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?
599 ty::Dynamic(..) | ty::Foreign(..) => {
600 let mut unit = univariant_uninterned(&[], &ReprOptions::default(),
601 StructKind::AlwaysSized)?;
603 Abi::Aggregate { ref mut sized } => *sized = false,
606 tcx.intern_layout(unit)
609 // Tuples, generators and closures.
610 ty::Generator(def_id, ref substs, _) => {
611 let tys = substs.field_tys(def_id, tcx);
612 univariant(&tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
613 &ReprOptions::default(),
614 StructKind::AlwaysSized)?
617 ty::Closure(def_id, ref substs) => {
618 let tys = substs.upvar_tys(def_id, tcx);
619 univariant(&tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
620 &ReprOptions::default(),
621 StructKind::AlwaysSized)?
625 let kind = if tys.len() == 0 {
626 StructKind::AlwaysSized
628 StructKind::MaybeUnsized
631 univariant(&tys.iter().map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
632 &ReprOptions::default(), kind)?
635 // SIMD vector types.
636 ty::Adt(def, ..) if def.repr.simd() => {
637 let element = self.layout_of(ty.simd_type(tcx))?;
638 let count = ty.simd_size(tcx) as u64;
640 let scalar = match element.abi {
641 Abi::Scalar(ref scalar) => scalar.clone(),
643 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` with \
644 a non-machine element type `{}`",
648 let size = element.size.checked_mul(count, dl)
649 .ok_or(LayoutError::SizeOverflow(ty))?;
650 let align = dl.vector_align(size);
651 let size = size.abi_align(align);
653 tcx.intern_layout(LayoutDetails {
654 variants: Variants::Single { index: 0 },
655 fields: FieldPlacement::Array {
656 stride: element.size,
669 ty::Adt(def, substs) => {
670 // Cache the field layouts.
671 let variants = def.variants.iter().map(|v| {
672 v.fields.iter().map(|field| {
673 self.layout_of(field.ty(tcx, substs))
674 }).collect::<Result<Vec<_>, _>>()
675 }).collect::<Result<Vec<_>, _>>()?;
678 let packed = def.repr.packed();
679 if packed && def.repr.align > 0 {
680 bug!("Union cannot be packed and aligned");
684 let pack = def.repr.pack as u64;
685 Align::from_bytes(pack, pack).unwrap()
688 let mut align = if packed {
694 if def.repr.align > 0 {
695 let repr_align = def.repr.align as u64;
697 Align::from_bytes(repr_align, repr_align).unwrap());
700 let mut size = Size::ZERO;
701 for field in &variants[0] {
702 assert!(!field.is_unsized());
705 let field_pack = field.align.min(pack);
706 align = align.max(field_pack);
708 align = align.max(field.align);
710 size = cmp::max(size, field.size);
713 return Ok(tcx.intern_layout(LayoutDetails {
714 variants: Variants::Single { index: 0 },
715 fields: FieldPlacement::Union(variants[0].len()),
716 abi: Abi::Aggregate { sized: true },
718 size: size.abi_align(align)
722 // A variant is absent if it's uninhabited and only has ZST fields.
723 // Present uninhabited variants only require space for their fields,
724 // but *not* an encoding of the discriminant (e.g. a tag value).
725 // See issue #49298 for more details on the need to leave space
726 // for non-ZST uninhabited data (mostly partial initialization).
727 let absent = |fields: &[TyLayout<'_>]| {
728 let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
729 let is_zst = fields.iter().all(|f| f.is_zst());
730 uninhabited && is_zst
732 let (present_first, present_second) = {
733 let mut present_variants = (0..variants.len()).filter(|&v| {
734 !absent(&variants[v])
736 (present_variants.next(), present_variants.next())
738 if present_first.is_none() {
739 // Uninhabited because it has no variants, or only absent ones.
740 return tcx.layout_raw(param_env.and(tcx.types.never));
743 let is_struct = !def.is_enum() ||
744 // Only one variant is present.
745 (present_second.is_none() &&
746 // Representation optimizations are allowed.
747 !def.repr.inhibit_enum_layout_opt());
749 // Struct, or univariant enum equivalent to a struct.
750 // (Typechecking will reject discriminant-sizing attrs.)
752 let v = present_first.unwrap();
753 let kind = if def.is_enum() || variants[v].len() == 0 {
754 StructKind::AlwaysSized
756 let param_env = tcx.param_env(def.did);
757 let last_field = def.variants[v].fields.last().unwrap();
758 let always_sized = tcx.type_of(last_field.did)
759 .is_sized(tcx.at(DUMMY_SP), param_env);
760 if !always_sized { StructKind::MaybeUnsized }
761 else { StructKind::AlwaysSized }
764 let mut st = univariant_uninterned(&variants[v], &def.repr, kind)?;
765 st.variants = Variants::Single { index: v };
766 let (start, end) = self.tcx.layout_scalar_valid_range(def.did);
768 Abi::Scalar(ref mut scalar) |
769 Abi::ScalarPair(ref mut scalar, _) => {
770 // the asserts ensure that we are not using the
771 // `#[rustc_layout_scalar_valid_range(n)]`
772 // attribute to widen the range of anything as that would probably
773 // result in UB somewhere
774 if let Bound::Included(start) = start {
775 assert!(*scalar.valid_range.start() <= start);
776 scalar.valid_range = start..=*scalar.valid_range.end();
778 if let Bound::Included(end) = end {
779 assert!(*scalar.valid_range.end() >= end);
780 scalar.valid_range = *scalar.valid_range.start()..=end;
784 start == Bound::Unbounded && end == Bound::Unbounded,
785 "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
790 return Ok(tcx.intern_layout(st));
793 // The current code for niche-filling relies on variant indices
794 // instead of actual discriminants, so dataful enums with
795 // explicit discriminants (RFC #2363) would misbehave.
796 let no_explicit_discriminants = def.variants.iter().enumerate()
797 .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i));
799 // Niche-filling enum optimization.
800 if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
801 let mut dataful_variant = None;
802 let mut niche_variants = usize::max_value()..=0;
804 // Find one non-ZST variant.
805 'variants: for (v, fields) in variants.iter().enumerate() {
811 if dataful_variant.is_none() {
812 dataful_variant = Some(v);
815 dataful_variant = None;
820 niche_variants = *niche_variants.start().min(&v)..=v;
823 if niche_variants.start() > niche_variants.end() {
824 dataful_variant = None;
827 if let Some(i) = dataful_variant {
828 let count = (niche_variants.end() - niche_variants.start() + 1) as u128;
829 for (field_index, &field) in variants[i].iter().enumerate() {
830 let niche = match self.find_niche(field)? {
831 Some(niche) => niche,
834 let (niche_start, niche_scalar) = match niche.reserve(self, count) {
839 let mut align = dl.aggregate_align;
840 let st = variants.iter().enumerate().map(|(j, v)| {
841 let mut st = univariant_uninterned(v,
842 &def.repr, StructKind::AlwaysSized)?;
843 st.variants = Variants::Single { index: j };
845 align = align.max(st.align);
848 }).collect::<Result<Vec<_>, _>>()?;
850 let offset = st[i].fields.offset(field_index) + niche.offset;
851 let size = st[i].size;
853 let mut abi = match st[i].abi {
854 Abi::Scalar(_) => Abi::Scalar(niche_scalar.clone()),
855 Abi::ScalarPair(ref first, ref second) => {
856 // We need to use scalar_unit to reset the
857 // valid range to the maximal one for that
858 // primitive, because only the niche is
859 // guaranteed to be initialised, not the
861 if offset.bytes() == 0 {
863 niche_scalar.clone(),
864 scalar_unit(second.value),
868 scalar_unit(first.value),
869 niche_scalar.clone(),
873 _ => Abi::Aggregate { sized: true },
876 if st.iter().all(|v| v.abi.is_uninhabited()) {
877 abi = Abi::Uninhabited;
880 return Ok(tcx.intern_layout(LayoutDetails {
881 variants: Variants::NicheFilling {
888 fields: FieldPlacement::Arbitrary {
889 offsets: vec![offset],
890 memory_index: vec![0]
900 let (mut min, mut max) = (i128::max_value(), i128::min_value());
901 let discr_type = def.repr.discr_type();
902 let bits = Integer::from_attr(tcx, discr_type).size().bits();
903 for (i, discr) in def.discriminants(tcx).enumerate() {
904 if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
907 let mut x = discr.val as i128;
908 if discr_type.is_signed() {
909 // sign extend the raw representation to be an i128
910 x = (x << (128 - bits)) >> (128 - bits);
912 if x < min { min = x; }
913 if x > max { max = x; }
915 // We might have no inhabited variants, so pretend there's at least one.
916 if (min, max) == (i128::max_value(), i128::min_value()) {
920 assert!(min <= max, "discriminant range is {}...{}", min, max);
921 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
923 let mut align = dl.aggregate_align;
924 let mut size = Size::ZERO;
926 // We're interested in the smallest alignment, so start large.
927 let mut start_align = Align::from_bytes(256, 256).unwrap();
928 assert_eq!(Integer::for_abi_align(dl, start_align), None);
930 // repr(C) on an enum tells us to make a (tag, union) layout,
931 // so we need to grow the prefix alignment to be at least
932 // the alignment of the union. (This value is used both for
933 // determining the alignment of the overall enum, and the
934 // determining the alignment of the payload after the tag.)
935 let mut prefix_align = min_ity.align(dl);
937 for fields in &variants {
938 for field in fields {
939 prefix_align = prefix_align.max(field.align);
944 // Create the set of structs that represent each variant.
945 let mut layout_variants = variants.iter().enumerate().map(|(i, field_layouts)| {
946 let mut st = univariant_uninterned(&field_layouts,
947 &def.repr, StructKind::Prefixed(min_ity.size(), prefix_align))?;
948 st.variants = Variants::Single { index: i };
949 // Find the first field we can't move later
950 // to make room for a larger discriminant.
951 for field in st.fields.index_by_increasing_offset().map(|j| field_layouts[j]) {
952 if !field.is_zst() || field.align.abi() != 1 {
953 start_align = start_align.min(field.align);
957 size = cmp::max(size, st.size);
958 align = align.max(st.align);
960 }).collect::<Result<Vec<_>, _>>()?;
962 // Align the maximum variant size to the largest alignment.
963 size = size.abi_align(align);
965 if size.bytes() >= dl.obj_size_bound() {
966 return Err(LayoutError::SizeOverflow(ty));
969 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
970 if typeck_ity < min_ity {
971 // It is a bug if Layout decided on a greater discriminant size than typeck for
972 // some reason at this point (based on values discriminant can take on). Mostly
973 // because this discriminant will be loaded, and then stored into variable of
974 // type calculated by typeck. Consider such case (a bug): typeck decided on
975 // byte-sized discriminant, but layout thinks we need a 16-bit to store all
976 // discriminant values. That would be a bug, because then, in codegen, in order
977 // to store this 16-bit discriminant into 8-bit sized temporary some of the
978 // space necessary to represent would have to be discarded (or layout is wrong
979 // on thinking it needs 16 bits)
980 bug!("layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
981 min_ity, typeck_ity);
982 // However, it is fine to make discr type however large (as an optimisation)
983 // after this point – we’ll just truncate the value we load in codegen.
986 // Check to see if we should use a different type for the
987 // discriminant. We can safely use a type with the same size
988 // as the alignment of the first field of each variant.
989 // We increase the size of the discriminant to avoid LLVM copying
990 // padding when it doesn't need to. This normally causes unaligned
991 // load/stores and excessive memcpy/memset operations. By using a
992 // bigger integer size, LLVM can be sure about its contents and
993 // won't be so conservative.
995 // Use the initial field alignment
996 let mut ity = if def.repr.c() || def.repr.int.is_some() {
999 Integer::for_abi_align(dl, start_align).unwrap_or(min_ity)
1002 // If the alignment is not larger than the chosen discriminant size,
1003 // don't use the alignment as the final size.
1007 // Patch up the variants' first few fields.
1008 let old_ity_size = min_ity.size();
1009 let new_ity_size = ity.size();
1010 for variant in &mut layout_variants {
1011 match variant.fields {
1012 FieldPlacement::Arbitrary { ref mut offsets, .. } => {
1014 if *i <= old_ity_size {
1015 assert_eq!(*i, old_ity_size);
1019 // We might be making the struct larger.
1020 if variant.size <= old_ity_size {
1021 variant.size = new_ity_size;
1029 let tag_mask = !0u128 >> (128 - ity.size().bits());
1031 value: Int(ity, signed),
1032 valid_range: (min as u128 & tag_mask)..=(max as u128 & tag_mask),
1034 let mut abi = Abi::Aggregate { sized: true };
1035 if tag.value.size(dl) == size {
1036 abi = Abi::Scalar(tag.clone());
1038 // Try to use a ScalarPair for all tagged enums.
1039 let mut common_prim = None;
1040 for (field_layouts, layout_variant) in variants.iter().zip(&layout_variants) {
1041 let offsets = match layout_variant.fields {
1042 FieldPlacement::Arbitrary { ref offsets, .. } => offsets,
1045 let mut fields = field_layouts
1048 .filter(|p| !p.0.is_zst());
1049 let (field, offset) = match (fields.next(), fields.next()) {
1050 (None, None) => continue,
1051 (Some(pair), None) => pair,
1057 let prim = match field.details.abi {
1058 Abi::Scalar(ref scalar) => scalar.value,
1064 if let Some(pair) = common_prim {
1065 // This is pretty conservative. We could go fancier
1066 // by conflating things like i32 and u32, or even
1067 // realising that (u8, u8) could just cohabit with
1069 if pair != (prim, offset) {
1074 common_prim = Some((prim, offset));
1077 if let Some((prim, offset)) = common_prim {
1078 let pair = scalar_pair(tag.clone(), scalar_unit(prim));
1079 let pair_offsets = match pair.fields {
1080 FieldPlacement::Arbitrary {
1084 assert_eq!(memory_index, &[0, 1]);
1089 if pair_offsets[0] == Size::ZERO &&
1090 pair_offsets[1] == *offset &&
1091 align == pair.align &&
1093 // We can use `ScalarPair` only when it matches our
1094 // already computed layout (including `#[repr(C)]`).
1100 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1101 abi = Abi::Uninhabited;
1104 tcx.intern_layout(LayoutDetails {
1105 variants: Variants::Tagged {
1107 variants: layout_variants,
1109 fields: FieldPlacement::Arbitrary {
1110 offsets: vec![Size::ZERO],
1111 memory_index: vec![0]
1119 // Types with no meaningful known layout.
1120 ty::Projection(_) | ty::Opaque(..) => {
1121 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1122 if ty == normalized {
1123 return Err(LayoutError::Unknown(ty));
1125 tcx.layout_raw(param_env.and(normalized))?
1129 ty::UnnormalizedProjection(..) |
1130 ty::GeneratorWitness(..) |
1132 bug!("LayoutDetails::compute: unexpected type `{}`", ty)
1135 ty::Param(_) | ty::Error => {
1136 return Err(LayoutError::Unknown(ty));
1141 /// This is invoked by the `layout_raw` query to record the final
1142 /// layout of each type.
1144 fn record_layout_for_printing(self, layout: TyLayout<'tcx>) {
1145 // If we are running with `-Zprint-type-sizes`, record layouts for
1146 // dumping later. Ignore layouts that are done with non-empty
1147 // environments or non-monomorphic layouts, as the user only wants
1148 // to see the stuff resulting from the final codegen session.
1150 !self.tcx.sess.opts.debugging_opts.print_type_sizes ||
1151 layout.ty.has_param_types() ||
1152 layout.ty.has_self_ty() ||
1153 !self.param_env.caller_bounds.is_empty()
1158 self.record_layout_for_printing_outlined(layout)
1161 fn record_layout_for_printing_outlined(self, layout: TyLayout<'tcx>) {
1162 // (delay format until we actually need it)
1163 let record = |kind, packed, opt_discr_size, variants| {
1164 let type_desc = format!("{:?}", layout.ty);
1165 self.tcx.sess.code_stats.borrow_mut().record_type_size(kind,
1174 let adt_def = match layout.ty.sty {
1175 ty::Adt(ref adt_def, _) => {
1176 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1180 ty::Closure(..) => {
1181 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1182 record(DataTypeKind::Closure, false, None, vec![]);
1187 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1192 let adt_kind = adt_def.adt_kind();
1193 let adt_packed = adt_def.repr.packed();
1195 let build_variant_info = |n: Option<ast::Name>,
1197 layout: TyLayout<'tcx>| {
1198 let mut min_size = Size::ZERO;
1199 let field_info: Vec<_> = flds.iter().enumerate().map(|(i, &name)| {
1200 match layout.field(self, i) {
1202 bug!("no layout found for field {}: `{:?}`", name, err);
1204 Ok(field_layout) => {
1205 let offset = layout.fields.offset(i);
1206 let field_end = offset + field_layout.size;
1207 if min_size < field_end {
1208 min_size = field_end;
1210 session::FieldInfo {
1211 name: name.to_string(),
1212 offset: offset.bytes(),
1213 size: field_layout.size.bytes(),
1214 align: field_layout.align.abi(),
1220 session::VariantInfo {
1221 name: n.map(|n|n.to_string()),
1222 kind: if layout.is_unsized() {
1223 session::SizeKind::Min
1225 session::SizeKind::Exact
1227 align: layout.align.abi(),
1228 size: if min_size.bytes() == 0 {
1237 match layout.variants {
1238 Variants::Single { index } => {
1239 debug!("print-type-size `{:#?}` variant {}",
1240 layout, adt_def.variants[index].name);
1241 if !adt_def.variants.is_empty() {
1242 let variant_def = &adt_def.variants[index];
1243 let fields: Vec<_> =
1244 variant_def.fields.iter().map(|f| f.ident.name).collect();
1245 record(adt_kind.into(),
1248 vec![build_variant_info(Some(variant_def.name),
1252 // (This case arises for *empty* enums; so give it
1254 record(adt_kind.into(), adt_packed, None, vec![]);
1258 Variants::NicheFilling { .. } |
1259 Variants::Tagged { .. } => {
1260 debug!("print-type-size `{:#?}` adt general variants def {}",
1261 layout.ty, adt_def.variants.len());
1262 let variant_infos: Vec<_> =
1263 adt_def.variants.iter().enumerate().map(|(i, variant_def)| {
1264 let fields: Vec<_> =
1265 variant_def.fields.iter().map(|f| f.ident.name).collect();
1266 build_variant_info(Some(variant_def.name),
1268 layout.for_variant(self, i))
1271 record(adt_kind.into(), adt_packed, match layout.variants {
1272 Variants::Tagged { ref tag, .. } => Some(tag.value.size(self)),
1280 /// Type size "skeleton", i.e. the only information determining a type's size.
1281 /// While this is conservative, (aside from constant sizes, only pointers,
1282 /// newtypes thereof and null pointer optimized enums are allowed), it is
1283 /// enough to statically check common usecases of transmute.
1284 #[derive(Copy, Clone, Debug)]
1285 pub enum SizeSkeleton<'tcx> {
1286 /// Any statically computable Layout.
1289 /// A potentially-fat pointer.
1291 /// If true, this pointer is never null.
1293 /// The type which determines the unsized metadata, if any,
1294 /// of this pointer. Either a type parameter or a projection
1295 /// depending on one, with regions erased.
1300 impl<'a, 'tcx> SizeSkeleton<'tcx> {
1301 pub fn compute(ty: Ty<'tcx>,
1302 tcx: TyCtxt<'a, 'tcx, 'tcx>,
1303 param_env: ty::ParamEnv<'tcx>)
1304 -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1305 debug_assert!(!ty.has_infer_types());
1307 // First try computing a static layout.
1308 let err = match tcx.layout_of(param_env.and(ty)) {
1310 return Ok(SizeSkeleton::Known(layout.size));
1316 ty::Ref(_, pointee, _) |
1317 ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1318 let non_zero = !ty.is_unsafe_ptr();
1319 let tail = tcx.struct_tail(pointee);
1321 ty::Param(_) | ty::Projection(_) => {
1322 debug_assert!(tail.has_param_types() || tail.has_self_ty());
1323 Ok(SizeSkeleton::Pointer {
1325 tail: tcx.erase_regions(&tail)
1329 bug!("SizeSkeleton::compute({}): layout errored ({}), yet \
1330 tail `{}` is not a type parameter or a projection",
1336 ty::Adt(def, substs) => {
1337 // Only newtypes and enums w/ nullable pointer optimization.
1338 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
1342 // Get a zero-sized variant or a pointer newtype.
1343 let zero_or_ptr_variant = |i: usize| {
1344 let fields = def.variants[i].fields.iter().map(|field| {
1345 SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env)
1348 for field in fields {
1351 SizeSkeleton::Known(size) => {
1352 if size.bytes() > 0 {
1356 SizeSkeleton::Pointer {..} => {
1367 let v0 = zero_or_ptr_variant(0)?;
1369 if def.variants.len() == 1 {
1370 if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1371 return Ok(SizeSkeleton::Pointer {
1372 non_zero: non_zero || match tcx.layout_scalar_valid_range(def.did) {
1373 (Bound::Included(start), Bound::Unbounded) => start > 0,
1374 (Bound::Included(start), Bound::Included(end)) =>
1375 0 < start && start < end,
1385 let v1 = zero_or_ptr_variant(1)?;
1386 // Nullable pointer enum optimization.
1388 (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None) |
1389 (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
1390 Ok(SizeSkeleton::Pointer {
1399 ty::Projection(_) | ty::Opaque(..) => {
1400 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1401 if ty == normalized {
1404 SizeSkeleton::compute(normalized, tcx, param_env)
1412 pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
1413 match (self, other) {
1414 (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
1415 (SizeSkeleton::Pointer { tail: a, .. },
1416 SizeSkeleton::Pointer { tail: b, .. }) => a == b,
1422 pub trait HasTyCtxt<'tcx>: HasDataLayout {
1423 fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx>;
1426 impl<'a, 'gcx, 'tcx> HasDataLayout for TyCtxt<'a, 'gcx, 'tcx> {
1427 fn data_layout(&self) -> &TargetDataLayout {
1432 impl<'a, 'gcx, 'tcx> HasTyCtxt<'gcx> for TyCtxt<'a, 'gcx, 'tcx> {
1433 fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> {
1438 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
1439 fn data_layout(&self) -> &TargetDataLayout {
1440 self.tcx.data_layout()
1444 impl<'gcx, 'tcx, T: HasTyCtxt<'gcx>> HasTyCtxt<'gcx> for LayoutCx<'tcx, T> {
1445 fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> {
1450 pub trait MaybeResult<T> {
1451 fn from_ok(x: T) -> Self;
1452 fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self;
1455 impl<T> MaybeResult<T> for T {
1456 fn from_ok(x: T) -> Self {
1459 fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self {
1464 impl<T, E> MaybeResult<T> for Result<T, E> {
1465 fn from_ok(x: T) -> Self {
1468 fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self {
1473 pub type TyLayout<'tcx> = ::rustc_target::abi::TyLayout<'tcx, Ty<'tcx>>;
1475 impl<'a, 'tcx> LayoutOf for LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
1477 type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
1479 /// Computes the layout of a type. Note that this implicitly
1480 /// executes in "reveal all" mode.
1481 fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
1482 let param_env = self.param_env.with_reveal_all();
1483 let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1484 let details = self.tcx.layout_raw(param_env.and(ty))?;
1485 let layout = TyLayout {
1490 // NB: This recording is normally disabled; when enabled, it
1491 // can however trigger recursive invocations of `layout_of`.
1492 // Therefore, we execute it *after* the main query has
1493 // completed, to avoid problems around recursive structures
1494 // and the like. (Admittedly, I wasn't able to reproduce a problem
1495 // here, but it seems like the right thing to do. -nmatsakis)
1496 self.record_layout_for_printing(layout);
1502 impl<'a, 'tcx> LayoutOf for LayoutCx<'tcx, ty::query::TyCtxtAt<'a, 'tcx, 'tcx>> {
1504 type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
1506 /// Computes the layout of a type. Note that this implicitly
1507 /// executes in "reveal all" mode.
1508 fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
1509 let param_env = self.param_env.with_reveal_all();
1510 let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1511 let details = self.tcx.layout_raw(param_env.and(ty))?;
1512 let layout = TyLayout {
1517 // NB: This recording is normally disabled; when enabled, it
1518 // can however trigger recursive invocations of `layout_of`.
1519 // Therefore, we execute it *after* the main query has
1520 // completed, to avoid problems around recursive structures
1521 // and the like. (Admittedly, I wasn't able to reproduce a problem
1522 // here, but it seems like the right thing to do. -nmatsakis)
1525 param_env: self.param_env
1527 cx.record_layout_for_printing(layout);
1533 // Helper (inherent) `layout_of` methods to avoid pushing `LayoutCx` to users.
1534 impl TyCtxt<'a, 'tcx, '_> {
1535 /// Computes the layout of a type. Note that this implicitly
1536 /// executes in "reveal all" mode.
1538 pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
1539 -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
1541 tcx: self.global_tcx(),
1542 param_env: param_env_and_ty.param_env
1544 cx.layout_of(param_env_and_ty.value)
1548 impl ty::query::TyCtxtAt<'a, 'tcx, '_> {
1549 /// Computes the layout of a type. Note that this implicitly
1550 /// executes in "reveal all" mode.
1552 pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
1553 -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
1555 tcx: self.global_tcx().at(self.span),
1556 param_env: param_env_and_ty.param_env
1558 cx.layout_of(param_env_and_ty.value)
1562 impl<'a, 'tcx, C> TyLayoutMethods<'tcx, C> for Ty<'tcx>
1563 where C: LayoutOf<Ty = Ty<'tcx>> + HasTyCtxt<'tcx>,
1564 C::TyLayout: MaybeResult<TyLayout<'tcx>>
1566 fn for_variant(this: TyLayout<'tcx>, cx: C, variant_index: usize) -> TyLayout<'tcx> {
1567 let details = match this.variants {
1568 Variants::Single { index } if index == variant_index => this.details,
1570 Variants::Single { index } => {
1571 // Deny calling for_variant more than once for non-Single enums.
1572 cx.layout_of(this.ty).map_same(|layout| {
1573 assert_eq!(layout.variants, Variants::Single { index });
1577 let fields = match this.ty.sty {
1578 ty::Adt(def, _) => def.variants[variant_index].fields.len(),
1582 tcx.intern_layout(LayoutDetails {
1583 variants: Variants::Single { index: variant_index },
1584 fields: FieldPlacement::Union(fields),
1585 abi: Abi::Uninhabited,
1586 align: tcx.data_layout.i8_align,
1591 Variants::NicheFilling { ref variants, .. } |
1592 Variants::Tagged { ref variants, .. } => {
1593 &variants[variant_index]
1597 assert_eq!(details.variants, Variants::Single { index: variant_index });
1605 fn field(this: TyLayout<'tcx>, cx: C, i: usize) -> C::TyLayout {
1607 cx.layout_of(match this.ty.sty {
1616 ty::GeneratorWitness(..) |
1618 ty::Dynamic(..) => {
1619 bug!("TyLayout::field_type({:?}): not applicable", this)
1622 // Potentially-fat pointers.
1623 ty::Ref(_, pointee, _) |
1624 ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1625 assert!(i < this.fields.count());
1627 // Reuse the fat *T type as its own thin pointer data field.
1628 // This provides information about e.g. DST struct pointees
1629 // (which may have no non-DST form), and will work as long
1630 // as the `Abi` or `FieldPlacement` is checked by users.
1632 let nil = tcx.mk_unit();
1633 let ptr_ty = if this.ty.is_unsafe_ptr() {
1636 tcx.mk_mut_ref(tcx.types.re_static, nil)
1638 return cx.layout_of(ptr_ty).map_same(|mut ptr_layout| {
1639 ptr_layout.ty = this.ty;
1644 match tcx.struct_tail(pointee).sty {
1646 ty::Str => tcx.types.usize,
1647 ty::Dynamic(_, _) => {
1649 tcx.types.re_static,
1650 tcx.mk_array(tcx.types.usize, 3),
1652 /* FIXME use actual fn pointers
1653 Warning: naively computing the number of entries in the
1654 vtable by counting the methods on the trait + methods on
1655 all parent traits does not work, because some methods can
1656 be not object safe and thus excluded from the vtable.
1657 Increase this counter if you tried to implement this but
1658 failed to do it without duplicating a lot of code from
1659 other places in the compiler: 2
1661 tcx.mk_array(tcx.types.usize, 3),
1662 tcx.mk_array(Option<fn()>),
1666 _ => bug!("TyLayout::field_type({:?}): not applicable", this)
1670 // Arrays and slices.
1671 ty::Array(element, _) |
1672 ty::Slice(element) => element,
1673 ty::Str => tcx.types.u8,
1675 // Tuples, generators and closures.
1676 ty::Closure(def_id, ref substs) => {
1677 substs.upvar_tys(def_id, tcx).nth(i).unwrap()
1680 ty::Generator(def_id, ref substs, _) => {
1681 substs.field_tys(def_id, tcx).nth(i).unwrap()
1684 ty::Tuple(tys) => tys[i],
1686 // SIMD vector types.
1687 ty::Adt(def, ..) if def.repr.simd() => {
1688 this.ty.simd_type(tcx)
1692 ty::Adt(def, substs) => {
1693 match this.variants {
1694 Variants::Single { index } => {
1695 def.variants[index].fields[i].ty(tcx, substs)
1698 // Discriminant field for enums (where applicable).
1699 Variants::Tagged { tag: ref discr, .. } |
1700 Variants::NicheFilling { niche: ref discr, .. } => {
1702 let layout = LayoutDetails::scalar(tcx, discr.clone());
1703 return MaybeResult::from_ok(TyLayout {
1704 details: tcx.intern_layout(layout),
1705 ty: discr.value.to_ty(tcx)
1711 ty::Projection(_) | ty::UnnormalizedProjection(..) | ty::Bound(..) |
1712 ty::Opaque(..) | ty::Param(_) | ty::Infer(_) | ty::Error => {
1713 bug!("TyLayout::field_type: unexpected type `{}`", this.ty)
1726 fn reserve<'a, 'tcx>(
1728 cx: LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>>,
1730 ) -> Option<(u128, Scalar)> {
1731 if count > self.available {
1734 let Scalar { value, valid_range: ref v } = self.scalar;
1735 let bits = value.size(cx).bits();
1736 assert!(bits <= 128);
1737 let max_value = !0u128 >> (128 - bits);
1738 let start = v.end().wrapping_add(1) & max_value;
1739 let end = v.end().wrapping_add(count) & max_value;
1740 Some((start, Scalar { value, valid_range: *v.start()..=end }))
1744 impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
1745 /// Find the offset of a niche leaf field, starting from
1746 /// the given type and recursing through aggregates.
1747 // FIXME(eddyb) traverse already optimized enums.
1748 fn find_niche(self, layout: TyLayout<'tcx>) -> Result<Option<Niche>, LayoutError<'tcx>> {
1749 let scalar_niche = |scalar: &Scalar, offset| {
1750 let Scalar { value, valid_range: ref v } = *scalar;
1752 let bits = value.size(self).bits();
1753 assert!(bits <= 128);
1754 let max_value = !0u128 >> (128 - bits);
1756 // Find out how many values are outside the valid range.
1757 let available = if v.start() <= v.end() {
1758 v.start() + (max_value - v.end())
1760 v.start() - v.end() - 1
1763 // Give up if there is no niche value available.
1768 Some(Niche { offset, scalar: scalar.clone(), available })
1771 // Locals variables which live across yields are stored
1772 // in the generator type as fields. These may be uninitialized
1773 // so we don't look for niches there.
1774 if let ty::Generator(..) = layout.ty.sty {
1779 Abi::Scalar(ref scalar) => {
1780 return Ok(scalar_niche(scalar, Size::ZERO));
1782 Abi::ScalarPair(ref a, ref b) => {
1783 // HACK(nox): We iter on `b` and then `a` because `max_by_key`
1784 // returns the last maximum.
1785 let niche = iter::once((b, a.value.size(self).abi_align(b.value.align(self))))
1786 .chain(iter::once((a, Size::ZERO)))
1787 .filter_map(|(scalar, offset)| scalar_niche(scalar, offset))
1788 .max_by_key(|niche| niche.available);
1791 Abi::Vector { ref element, .. } => {
1792 return Ok(scalar_niche(element, Size::ZERO));
1797 // Perhaps one of the fields is non-zero, let's recurse and find out.
1798 if let FieldPlacement::Union(_) = layout.fields {
1799 // Only Rust enums have safe-to-inspect fields
1800 // (a discriminant), other unions are unsafe.
1801 if let Variants::Single { .. } = layout.variants {
1805 if let FieldPlacement::Array { .. } = layout.fields {
1806 if layout.fields.count() > 0 {
1807 return self.find_niche(layout.field(self, 0)?);
1812 let mut niche = None;
1813 let mut available = 0;
1814 for i in 0..layout.fields.count() {
1815 if let Some(mut c) = self.find_niche(layout.field(self, i)?)? {
1816 if c.available > available {
1817 available = c.available;
1818 c.offset += layout.fields.offset(i);
1827 impl<'a> HashStable<StableHashingContext<'a>> for Variants {
1828 fn hash_stable<W: StableHasherResult>(&self,
1829 hcx: &mut StableHashingContext<'a>,
1830 hasher: &mut StableHasher<W>) {
1831 use ty::layout::Variants::*;
1832 mem::discriminant(self).hash_stable(hcx, hasher);
1835 Single { index } => {
1836 index.hash_stable(hcx, hasher);
1842 tag.hash_stable(hcx, hasher);
1843 variants.hash_stable(hcx, hasher);
1852 dataful_variant.hash_stable(hcx, hasher);
1853 niche_variants.start().hash_stable(hcx, hasher);
1854 niche_variants.end().hash_stable(hcx, hasher);
1855 niche.hash_stable(hcx, hasher);
1856 niche_start.hash_stable(hcx, hasher);
1857 variants.hash_stable(hcx, hasher);
1863 impl<'a> HashStable<StableHashingContext<'a>> for FieldPlacement {
1864 fn hash_stable<W: StableHasherResult>(&self,
1865 hcx: &mut StableHashingContext<'a>,
1866 hasher: &mut StableHasher<W>) {
1867 use ty::layout::FieldPlacement::*;
1868 mem::discriminant(self).hash_stable(hcx, hasher);
1872 count.hash_stable(hcx, hasher);
1874 Array { count, stride } => {
1875 count.hash_stable(hcx, hasher);
1876 stride.hash_stable(hcx, hasher);
1878 Arbitrary { ref offsets, ref memory_index } => {
1879 offsets.hash_stable(hcx, hasher);
1880 memory_index.hash_stable(hcx, hasher);
1886 impl<'a> HashStable<StableHashingContext<'a>> for Abi {
1887 fn hash_stable<W: StableHasherResult>(&self,
1888 hcx: &mut StableHashingContext<'a>,
1889 hasher: &mut StableHasher<W>) {
1890 use ty::layout::Abi::*;
1891 mem::discriminant(self).hash_stable(hcx, hasher);
1895 Scalar(ref value) => {
1896 value.hash_stable(hcx, hasher);
1898 ScalarPair(ref a, ref b) => {
1899 a.hash_stable(hcx, hasher);
1900 b.hash_stable(hcx, hasher);
1902 Vector { ref element, count } => {
1903 element.hash_stable(hcx, hasher);
1904 count.hash_stable(hcx, hasher);
1906 Aggregate { sized } => {
1907 sized.hash_stable(hcx, hasher);
1913 impl<'a> HashStable<StableHashingContext<'a>> for Scalar {
1914 fn hash_stable<W: StableHasherResult>(&self,
1915 hcx: &mut StableHashingContext<'a>,
1916 hasher: &mut StableHasher<W>) {
1917 let Scalar { value, ref valid_range } = *self;
1918 value.hash_stable(hcx, hasher);
1919 valid_range.start().hash_stable(hcx, hasher);
1920 valid_range.end().hash_stable(hcx, hasher);
1924 impl_stable_hash_for!(struct ::ty::layout::LayoutDetails {
1932 impl_stable_hash_for!(enum ::ty::layout::Integer {
1940 impl_stable_hash_for!(enum ::ty::layout::Primitive {
1941 Int(integer, signed),
1946 impl<'gcx> HashStable<StableHashingContext<'gcx>> for Align {
1947 fn hash_stable<W: StableHasherResult>(&self,
1948 hcx: &mut StableHashingContext<'gcx>,
1949 hasher: &mut StableHasher<W>) {
1950 self.abi().hash_stable(hcx, hasher);
1951 self.pref().hash_stable(hcx, hasher);
1955 impl<'gcx> HashStable<StableHashingContext<'gcx>> for Size {
1956 fn hash_stable<W: StableHasherResult>(&self,
1957 hcx: &mut StableHashingContext<'gcx>,
1958 hasher: &mut StableHasher<W>) {
1959 self.bytes().hash_stable(hcx, hasher);
1963 impl<'a, 'gcx> HashStable<StableHashingContext<'a>> for LayoutError<'gcx>
1965 fn hash_stable<W: StableHasherResult>(&self,
1966 hcx: &mut StableHashingContext<'a>,
1967 hasher: &mut StableHasher<W>) {
1968 use ty::layout::LayoutError::*;
1969 mem::discriminant(self).hash_stable(hcx, hasher);
1973 SizeOverflow(t) => t.hash_stable(hcx, hasher)