1 // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 use session::{self, DataTypeKind};
12 use ty::{self, Ty, TyCtxt, TypeFoldable, ReprOptions};
14 use syntax::ast::{self, IntTy, UintTy};
16 use syntax_pos::DUMMY_SP;
25 use ich::StableHashingContext;
26 use rustc_data_structures::stable_hasher::{HashStable, StableHasher,
29 pub use rustc_target::abi::*;
31 pub trait IntegerExt {
32 fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, signed: bool) -> Ty<'tcx>;
33 fn from_attr<C: HasDataLayout>(cx: C, ity: attr::IntType) -> Integer;
34 fn repr_discr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
42 impl IntegerExt for Integer {
43 fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, signed: bool) -> Ty<'tcx> {
44 match (*self, signed) {
45 (I8, false) => tcx.types.u8,
46 (I16, false) => tcx.types.u16,
47 (I32, false) => tcx.types.u32,
48 (I64, false) => tcx.types.u64,
49 (I128, false) => tcx.types.u128,
50 (I8, true) => tcx.types.i8,
51 (I16, true) => tcx.types.i16,
52 (I32, true) => tcx.types.i32,
53 (I64, true) => tcx.types.i64,
54 (I128, true) => tcx.types.i128,
58 /// Get the Integer type from an attr::IntType.
59 fn from_attr<C: HasDataLayout>(cx: C, ity: attr::IntType) -> Integer {
60 let dl = cx.data_layout();
63 attr::SignedInt(IntTy::I8) | attr::UnsignedInt(UintTy::U8) => I8,
64 attr::SignedInt(IntTy::I16) | attr::UnsignedInt(UintTy::U16) => I16,
65 attr::SignedInt(IntTy::I32) | attr::UnsignedInt(UintTy::U32) => I32,
66 attr::SignedInt(IntTy::I64) | attr::UnsignedInt(UintTy::U64) => I64,
67 attr::SignedInt(IntTy::I128) | attr::UnsignedInt(UintTy::U128) => I128,
68 attr::SignedInt(IntTy::Isize) | attr::UnsignedInt(UintTy::Usize) => {
69 dl.ptr_sized_integer()
74 /// Find the appropriate Integer type and signedness for the given
75 /// signed discriminant range and #[repr] attribute.
76 /// N.B.: u128 values above i128::MAX will be treated as signed, but
77 /// that shouldn't affect anything, other than maybe debuginfo.
78 fn repr_discr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
84 // Theoretically, negative values could be larger in unsigned representation
85 // than the unsigned representation of the signed minimum. However, if there
86 // are any negative values, the only valid unsigned representation is u128
87 // which can fit all i128 values, so the result remains unaffected.
88 let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
89 let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
91 let mut min_from_extern = None;
94 if let Some(ity) = repr.int {
95 let discr = Integer::from_attr(tcx, ity);
96 let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
98 bug!("Integer::repr_discr: `#[repr]` hint too small for \
99 discriminant range of enum `{}", ty)
101 return (discr, ity.is_signed());
105 match &tcx.sess.target.target.arch[..] {
106 // WARNING: the ARM EABI has two variants; the one corresponding
107 // to `at_least == I32` appears to be used on Linux and NetBSD,
108 // but some systems may use the variant corresponding to no
109 // lower bound. However, we don't run on those yet...?
110 "arm" => min_from_extern = Some(I32),
111 _ => min_from_extern = Some(I32),
115 let at_least = min_from_extern.unwrap_or(min_default);
117 // If there are no negative values, we can use the unsigned fit.
119 (cmp::max(unsigned_fit, at_least), false)
121 (cmp::max(signed_fit, at_least), true)
126 pub trait PrimitiveExt {
127 fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx>;
130 impl PrimitiveExt for Primitive {
131 fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx> {
133 Int(i, signed) => i.to_ty(tcx, signed),
134 Float(FloatTy::F32) => tcx.types.f32,
135 Float(FloatTy::F64) => tcx.types.f64,
136 Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
141 /// The first half of a fat pointer.
143 /// - For a trait object, this is the address of the box.
144 /// - For a slice, this is the base address.
145 pub const FAT_PTR_ADDR: usize = 0;
147 /// The second half of a fat pointer.
149 /// - For a trait object, this is the address of the vtable.
150 /// - For a slice, this is the length.
151 pub const FAT_PTR_EXTRA: usize = 1;
153 #[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable)]
154 pub enum LayoutError<'tcx> {
156 SizeOverflow(Ty<'tcx>)
159 impl<'tcx> fmt::Display for LayoutError<'tcx> {
160 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
162 LayoutError::Unknown(ty) => {
163 write!(f, "the type `{:?}` has an unknown layout", ty)
165 LayoutError::SizeOverflow(ty) => {
166 write!(f, "the type `{:?}` is too big for the current architecture", ty)
172 fn layout_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
173 query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
174 -> Result<&'tcx LayoutDetails, LayoutError<'tcx>>
176 ty::tls::with_related_context(tcx, move |icx| {
177 let rec_limit = *tcx.sess.recursion_limit.get();
178 let (param_env, ty) = query.into_parts();
180 if icx.layout_depth > rec_limit {
182 &format!("overflow representing the type `{}`", ty));
185 // Update the ImplicitCtxt to increase the layout_depth
186 let icx = ty::tls::ImplicitCtxt {
187 layout_depth: icx.layout_depth + 1,
191 ty::tls::enter_context(&icx, |_| {
192 let cx = LayoutCx { tcx, param_env };
193 cx.layout_raw_uncached(ty)
198 pub fn provide(providers: &mut ty::query::Providers<'_>) {
199 *providers = ty::query::Providers {
205 #[derive(Copy, Clone)]
206 pub struct LayoutCx<'tcx, C> {
208 pub param_env: ty::ParamEnv<'tcx>
211 impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
212 fn layout_raw_uncached(self, ty: Ty<'tcx>)
213 -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> {
215 let param_env = self.param_env;
216 let dl = self.data_layout();
217 let scalar_unit = |value: Primitive| {
218 let bits = value.size(dl).bits();
219 assert!(bits <= 128);
222 valid_range: 0..=(!0 >> (128 - bits))
225 let scalar = |value: Primitive| {
226 tcx.intern_layout(LayoutDetails::scalar(self, scalar_unit(value)))
228 let scalar_pair = |a: Scalar, b: Scalar| {
229 let align = a.value.align(dl).max(b.value.align(dl)).max(dl.aggregate_align);
230 let b_offset = a.value.size(dl).abi_align(b.value.align(dl));
231 let size = (b_offset + b.value.size(dl)).abi_align(align);
233 variants: Variants::Single { index: 0 },
234 fields: FieldPlacement::Arbitrary {
235 offsets: vec![Size::ZERO, b_offset],
236 memory_index: vec![0, 1]
238 abi: Abi::ScalarPair(a, b),
244 #[derive(Copy, Clone, Debug)]
246 /// A tuple, closure, or univariant which cannot be coerced to unsized.
248 /// A univariant, the last field of which may be coerced to unsized.
250 /// A univariant, but with a prefix of an arbitrary size & alignment (e.g. enum tag).
251 Prefixed(Size, Align),
254 let univariant_uninterned = |fields: &[TyLayout<'_>], repr: &ReprOptions, kind| {
255 let packed = repr.packed();
256 if packed && repr.align > 0 {
257 bug!("struct cannot be packed and aligned");
261 let pack = repr.pack as u64;
262 Align::from_bytes(pack, pack).unwrap()
265 let mut align = if packed {
271 let mut sized = true;
272 let mut offsets = vec![Size::ZERO; fields.len()];
273 let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
275 let mut optimize = !repr.inhibit_struct_field_reordering_opt();
276 if let StructKind::Prefixed(_, align) = kind {
277 optimize &= align.abi() == 1;
281 let end = if let StructKind::MaybeUnsized = kind {
286 let optimizing = &mut inverse_memory_index[..end];
287 let field_align = |f: &TyLayout<'_>| {
288 if packed { f.align.min(pack).abi() } else { f.align.abi() }
291 StructKind::AlwaysSized |
292 StructKind::MaybeUnsized => {
293 optimizing.sort_by_key(|&x| {
294 // Place ZSTs first to avoid "interesting offsets",
295 // especially with only one or two non-ZST fields.
296 let f = &fields[x as usize];
297 (!f.is_zst(), cmp::Reverse(field_align(f)))
300 StructKind::Prefixed(..) => {
301 optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
306 // inverse_memory_index holds field indices by increasing memory offset.
307 // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
308 // We now write field offsets to the corresponding offset slot;
309 // field 5 with offset 0 puts 0 in offsets[5].
310 // At the bottom of this function, we use inverse_memory_index to produce memory_index.
312 let mut offset = Size::ZERO;
314 if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
316 let prefix_align = prefix_align.min(pack);
317 align = align.max(prefix_align);
319 align = align.max(prefix_align);
321 offset = prefix_size.abi_align(prefix_align);
324 for &i in &inverse_memory_index {
325 let field = fields[i as usize];
327 bug!("univariant: field #{} of `{}` comes after unsized field",
331 if field.is_unsized() {
335 // Invariant: offset < dl.obj_size_bound() <= 1<<61
337 let field_pack = field.align.min(pack);
338 offset = offset.abi_align(field_pack);
339 align = align.max(field_pack);
342 offset = offset.abi_align(field.align);
343 align = align.max(field.align);
346 debug!("univariant offset: {:?} field: {:#?}", offset, field);
347 offsets[i as usize] = offset;
349 offset = offset.checked_add(field.size, dl)
350 .ok_or(LayoutError::SizeOverflow(ty))?;
354 let repr_align = repr.align as u64;
355 align = align.max(Align::from_bytes(repr_align, repr_align).unwrap());
356 debug!("univariant repr_align: {:?}", repr_align);
359 debug!("univariant min_size: {:?}", offset);
360 let min_size = offset;
362 // As stated above, inverse_memory_index holds field indices by increasing offset.
363 // This makes it an already-sorted view of the offsets vec.
364 // To invert it, consider:
365 // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
366 // Field 5 would be the first element, so memory_index is i:
367 // Note: if we didn't optimize, it's already right.
369 let mut memory_index;
371 memory_index = vec![0; inverse_memory_index.len()];
373 for i in 0..inverse_memory_index.len() {
374 memory_index[inverse_memory_index[i] as usize] = i as u32;
377 memory_index = inverse_memory_index;
380 let size = min_size.abi_align(align);
381 let mut abi = Abi::Aggregate { sized };
383 // Unpack newtype ABIs and find scalar pairs.
384 if sized && size.bytes() > 0 {
385 // All other fields must be ZSTs, and we need them to all start at 0.
386 let mut zst_offsets =
387 offsets.iter().enumerate().filter(|&(i, _)| fields[i].is_zst());
388 if zst_offsets.all(|(_, o)| o.bytes() == 0) {
389 let mut non_zst_fields =
390 fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
392 match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
393 // We have exactly one non-ZST field.
394 (Some((i, field)), None, None) => {
395 // Field fills the struct and it has a scalar or scalar pair ABI.
396 if offsets[i].bytes() == 0 &&
397 align.abi() == field.align.abi() &&
400 // For plain scalars, or vectors of them, we can't unpack
401 // newtypes for `#[repr(C)]`, as that affects C ABIs.
402 Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
403 abi = field.abi.clone();
405 // But scalar pairs are Rust-specific and get
406 // treated as aggregates by C ABIs anyway.
407 Abi::ScalarPair(..) => {
408 abi = field.abi.clone();
415 // Two non-ZST fields, and they're both scalars.
416 (Some((i, &TyLayout {
417 details: &LayoutDetails { abi: Abi::Scalar(ref a), .. }, ..
418 })), Some((j, &TyLayout {
419 details: &LayoutDetails { abi: Abi::Scalar(ref b), .. }, ..
421 // Order by the memory placement, not source order.
422 let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
427 let pair = scalar_pair(a.clone(), b.clone());
428 let pair_offsets = match pair.fields {
429 FieldPlacement::Arbitrary {
433 assert_eq!(memory_index, &[0, 1]);
438 if offsets[i] == pair_offsets[0] &&
439 offsets[j] == pair_offsets[1] &&
440 align == pair.align &&
442 // We can use `ScalarPair` only when it matches our
443 // already computed layout (including `#[repr(C)]`).
453 if sized && fields.iter().any(|f| f.abi.is_uninhabited()) {
454 abi = Abi::Uninhabited;
458 variants: Variants::Single { index: 0 },
459 fields: FieldPlacement::Arbitrary {
468 let univariant = |fields: &[TyLayout<'_>], repr: &ReprOptions, kind| {
469 Ok(tcx.intern_layout(univariant_uninterned(fields, repr, kind)?))
471 debug_assert!(!ty.has_infer_types());
476 tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
477 value: Int(I8, false),
482 tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
483 value: Int(I32, false),
484 valid_range: 0..=0x10FFFF
488 scalar(Int(Integer::from_attr(dl, attr::SignedInt(ity)), true))
491 scalar(Int(Integer::from_attr(dl, attr::UnsignedInt(ity)), false))
493 ty::Float(fty) => scalar(Float(fty)),
495 let mut ptr = scalar_unit(Pointer);
496 ptr.valid_range = 1..=*ptr.valid_range.end();
497 tcx.intern_layout(LayoutDetails::scalar(self, ptr))
502 tcx.intern_layout(LayoutDetails {
503 variants: Variants::Single { index: 0 },
504 fields: FieldPlacement::Union(0),
505 abi: Abi::Uninhabited,
511 // Potentially-fat pointers.
512 ty::Ref(_, pointee, _) |
513 ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
514 let mut data_ptr = scalar_unit(Pointer);
515 if !ty.is_unsafe_ptr() {
516 data_ptr.valid_range = 1..=*data_ptr.valid_range.end();
519 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
520 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
521 return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
524 let unsized_part = tcx.struct_tail(pointee);
525 let metadata = match unsized_part.sty {
527 return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
529 ty::Slice(_) | ty::Str => {
530 scalar_unit(Int(dl.ptr_sized_integer(), false))
533 let mut vtable = scalar_unit(Pointer);
534 vtable.valid_range = 1..=*vtable.valid_range.end();
537 _ => return Err(LayoutError::Unknown(unsized_part))
540 // Effectively a (ptr, meta) tuple.
541 tcx.intern_layout(scalar_pair(data_ptr, metadata))
544 // Arrays and slices.
545 ty::Array(element, mut count) => {
546 if count.has_projections() {
547 count = tcx.normalize_erasing_regions(param_env, count);
548 if count.has_projections() {
549 return Err(LayoutError::Unknown(ty));
553 let element = self.layout_of(element)?;
554 let count = count.unwrap_usize(tcx);
555 let size = element.size.checked_mul(count, dl)
556 .ok_or(LayoutError::SizeOverflow(ty))?;
558 tcx.intern_layout(LayoutDetails {
559 variants: Variants::Single { index: 0 },
560 fields: FieldPlacement::Array {
561 stride: element.size,
564 abi: Abi::Aggregate { sized: true },
565 align: element.align,
569 ty::Slice(element) => {
570 let element = self.layout_of(element)?;
571 tcx.intern_layout(LayoutDetails {
572 variants: Variants::Single { index: 0 },
573 fields: FieldPlacement::Array {
574 stride: element.size,
577 abi: Abi::Aggregate { sized: false },
578 align: element.align,
583 tcx.intern_layout(LayoutDetails {
584 variants: Variants::Single { index: 0 },
585 fields: FieldPlacement::Array {
586 stride: Size::from_bytes(1),
589 abi: Abi::Aggregate { sized: false },
597 univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?
599 ty::Dynamic(..) | ty::Foreign(..) => {
600 let mut unit = univariant_uninterned(&[], &ReprOptions::default(),
601 StructKind::AlwaysSized)?;
603 Abi::Aggregate { ref mut sized } => *sized = false,
606 tcx.intern_layout(unit)
609 // Tuples, generators and closures.
610 ty::Generator(def_id, ref substs, _) => {
611 let tys = substs.field_tys(def_id, tcx);
612 univariant(&tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
613 &ReprOptions::default(),
614 StructKind::AlwaysSized)?
617 ty::Closure(def_id, ref substs) => {
618 let tys = substs.upvar_tys(def_id, tcx);
619 univariant(&tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
620 &ReprOptions::default(),
621 StructKind::AlwaysSized)?
625 let kind = if tys.len() == 0 {
626 StructKind::AlwaysSized
628 StructKind::MaybeUnsized
631 univariant(&tys.iter().map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
632 &ReprOptions::default(), kind)?
635 // SIMD vector types.
636 ty::Adt(def, ..) if def.repr.simd() => {
637 let element = self.layout_of(ty.simd_type(tcx))?;
638 let count = ty.simd_size(tcx) as u64;
640 let scalar = match element.abi {
641 Abi::Scalar(ref scalar) => scalar.clone(),
643 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` with \
644 a non-machine element type `{}`",
648 let size = element.size.checked_mul(count, dl)
649 .ok_or(LayoutError::SizeOverflow(ty))?;
650 let align = dl.vector_align(size);
651 let size = size.abi_align(align);
653 tcx.intern_layout(LayoutDetails {
654 variants: Variants::Single { index: 0 },
655 fields: FieldPlacement::Array {
656 stride: element.size,
669 ty::Adt(def, substs) => {
670 // Cache the field layouts.
671 let variants = def.variants.iter().map(|v| {
672 v.fields.iter().map(|field| {
673 self.layout_of(field.ty(tcx, substs))
674 }).collect::<Result<Vec<_>, _>>()
675 }).collect::<Result<Vec<_>, _>>()?;
678 let packed = def.repr.packed();
679 if packed && def.repr.align > 0 {
680 bug!("Union cannot be packed and aligned");
684 let pack = def.repr.pack as u64;
685 Align::from_bytes(pack, pack).unwrap()
688 let mut align = if packed {
694 if def.repr.align > 0 {
695 let repr_align = def.repr.align as u64;
697 Align::from_bytes(repr_align, repr_align).unwrap());
700 let mut size = Size::ZERO;
701 for field in &variants[0] {
702 assert!(!field.is_unsized());
705 let field_pack = field.align.min(pack);
706 align = align.max(field_pack);
708 align = align.max(field.align);
710 size = cmp::max(size, field.size);
713 return Ok(tcx.intern_layout(LayoutDetails {
714 variants: Variants::Single { index: 0 },
715 fields: FieldPlacement::Union(variants[0].len()),
716 abi: Abi::Aggregate { sized: true },
718 size: size.abi_align(align)
722 // A variant is absent if it's uninhabited and only has ZST fields.
723 // Present uninhabited variants only require space for their fields,
724 // but *not* an encoding of the discriminant (e.g. a tag value).
725 // See issue #49298 for more details on the need to leave space
726 // for non-ZST uninhabited data (mostly partial initialization).
727 let absent = |fields: &[TyLayout<'_>]| {
728 let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
729 let is_zst = fields.iter().all(|f| f.is_zst());
730 uninhabited && is_zst
732 let (present_first, present_second) = {
733 let mut present_variants = (0..variants.len()).filter(|&v| {
734 !absent(&variants[v])
736 (present_variants.next(), present_variants.next())
738 if present_first.is_none() {
739 // Uninhabited because it has no variants, or only absent ones.
740 return tcx.layout_raw(param_env.and(tcx.types.never));
743 let is_struct = !def.is_enum() ||
744 // Only one variant is present.
745 (present_second.is_none() &&
746 // Representation optimizations are allowed.
747 !def.repr.inhibit_enum_layout_opt());
749 // Struct, or univariant enum equivalent to a struct.
750 // (Typechecking will reject discriminant-sizing attrs.)
752 let v = present_first.unwrap();
753 let kind = if def.is_enum() || variants[v].len() == 0 {
754 StructKind::AlwaysSized
756 let param_env = tcx.param_env(def.did);
757 let last_field = def.variants[v].fields.last().unwrap();
758 let always_sized = tcx.type_of(last_field.did)
759 .is_sized(tcx.at(DUMMY_SP), param_env);
760 if !always_sized { StructKind::MaybeUnsized }
761 else { StructKind::AlwaysSized }
764 let mut st = univariant_uninterned(&variants[v], &def.repr, kind)?;
765 st.variants = Variants::Single { index: v };
766 let (start, end) = self.tcx.layout_scalar_valid_range(def.did);
768 Abi::Scalar(ref mut scalar) |
769 Abi::ScalarPair(ref mut scalar, _) => {
770 // the asserts ensure that we are not using the
771 // `#[rustc_layout_scalar_valid_range(n)]`
772 // attribute to widen the range of anything as that would probably
773 // result in UB somewhere
774 if let Bound::Included(start) = start {
775 assert!(*scalar.valid_range.start() <= start);
776 scalar.valid_range = start..=*scalar.valid_range.end();
778 if let Bound::Included(end) = end {
779 assert!(*scalar.valid_range.end() >= end);
780 scalar.valid_range = *scalar.valid_range.start()..=end;
784 start == Bound::Unbounded && end == Bound::Unbounded,
785 "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
790 return Ok(tcx.intern_layout(st));
793 // The current code for niche-filling relies on variant indices
794 // instead of actual discriminants, so dataful enums with
795 // explicit discriminants (RFC #2363) would misbehave.
796 let no_explicit_discriminants = def.variants.iter().enumerate()
797 .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i));
799 // Niche-filling enum optimization.
800 if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
801 let mut dataful_variant = None;
802 let mut niche_variants = usize::max_value()..=0;
804 // Find one non-ZST variant.
805 'variants: for (v, fields) in variants.iter().enumerate() {
811 if dataful_variant.is_none() {
812 dataful_variant = Some(v);
815 dataful_variant = None;
820 niche_variants = *niche_variants.start().min(&v)..=v;
823 if niche_variants.start() > niche_variants.end() {
824 dataful_variant = None;
827 if let Some(i) = dataful_variant {
828 let count = (niche_variants.end() - niche_variants.start() + 1) as u128;
829 for (field_index, &field) in variants[i].iter().enumerate() {
830 let niche = match self.find_niche(field)? {
831 Some(niche) => niche,
834 let (niche_start, niche_scalar) = match niche.reserve(self, count) {
839 let mut align = dl.aggregate_align;
840 let st = variants.iter().enumerate().map(|(j, v)| {
841 let mut st = univariant_uninterned(v,
842 &def.repr, StructKind::AlwaysSized)?;
843 st.variants = Variants::Single { index: j };
845 align = align.max(st.align);
848 }).collect::<Result<Vec<_>, _>>()?;
850 let offset = st[i].fields.offset(field_index) + niche.offset;
851 let size = st[i].size;
853 let mut abi = match st[i].abi {
854 Abi::Scalar(_) => Abi::Scalar(niche_scalar.clone()),
855 Abi::ScalarPair(ref first, ref second) => {
856 // We need to use scalar_unit to reset the
857 // valid range to the maximal one for that
858 // primitive, because only the niche is
859 // guaranteed to be initialised, not the
861 if offset.bytes() == 0 {
863 niche_scalar.clone(),
864 scalar_unit(second.value),
868 scalar_unit(first.value),
869 niche_scalar.clone(),
873 _ => Abi::Aggregate { sized: true },
876 if st.iter().all(|v| v.abi.is_uninhabited()) {
877 abi = Abi::Uninhabited;
880 return Ok(tcx.intern_layout(LayoutDetails {
881 variants: Variants::NicheFilling {
888 fields: FieldPlacement::Arbitrary {
889 offsets: vec![offset],
890 memory_index: vec![0]
900 let (mut min, mut max) = (i128::max_value(), i128::min_value());
901 let discr_type = def.repr.discr_type();
902 let bits = Integer::from_attr(tcx, discr_type).size().bits();
903 for (i, discr) in def.discriminants(tcx).enumerate() {
904 if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
907 let mut x = discr.val as i128;
908 if discr_type.is_signed() {
909 // sign extend the raw representation to be an i128
910 x = (x << (128 - bits)) >> (128 - bits);
912 if x < min { min = x; }
913 if x > max { max = x; }
915 // We might have no inhabited variants, so pretend there's at least one.
916 if (min, max) == (i128::max_value(), i128::min_value()) {
920 assert!(min <= max, "discriminant range is {}...{}", min, max);
921 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
923 let mut align = dl.aggregate_align;
924 let mut size = Size::ZERO;
926 // We're interested in the smallest alignment, so start large.
927 let mut start_align = Align::from_bytes(256, 256).unwrap();
928 assert_eq!(Integer::for_abi_align(dl, start_align), None);
930 // repr(C) on an enum tells us to make a (tag, union) layout,
931 // so we need to grow the prefix alignment to be at least
932 // the alignment of the union. (This value is used both for
933 // determining the alignment of the overall enum, and the
934 // determining the alignment of the payload after the tag.)
935 let mut prefix_align = min_ity.align(dl);
937 for fields in &variants {
938 for field in fields {
939 prefix_align = prefix_align.max(field.align);
944 // Create the set of structs that represent each variant.
945 let mut layout_variants = variants.iter().enumerate().map(|(i, field_layouts)| {
946 let mut st = univariant_uninterned(&field_layouts,
947 &def.repr, StructKind::Prefixed(min_ity.size(), prefix_align))?;
948 st.variants = Variants::Single { index: i };
949 // Find the first field we can't move later
950 // to make room for a larger discriminant.
951 for field in st.fields.index_by_increasing_offset().map(|j| field_layouts[j]) {
952 if !field.is_zst() || field.align.abi() != 1 {
953 start_align = start_align.min(field.align);
957 size = cmp::max(size, st.size);
958 align = align.max(st.align);
960 }).collect::<Result<Vec<_>, _>>()?;
962 // Align the maximum variant size to the largest alignment.
963 size = size.abi_align(align);
965 if size.bytes() >= dl.obj_size_bound() {
966 return Err(LayoutError::SizeOverflow(ty));
969 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
970 if typeck_ity < min_ity {
971 // It is a bug if Layout decided on a greater discriminant size than typeck for
972 // some reason at this point (based on values discriminant can take on). Mostly
973 // because this discriminant will be loaded, and then stored into variable of
974 // type calculated by typeck. Consider such case (a bug): typeck decided on
975 // byte-sized discriminant, but layout thinks we need a 16-bit to store all
976 // discriminant values. That would be a bug, because then, in codegen, in order
977 // to store this 16-bit discriminant into 8-bit sized temporary some of the
978 // space necessary to represent would have to be discarded (or layout is wrong
979 // on thinking it needs 16 bits)
980 bug!("layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
981 min_ity, typeck_ity);
982 // However, it is fine to make discr type however large (as an optimisation)
983 // after this point – we’ll just truncate the value we load in codegen.
986 // Check to see if we should use a different type for the
987 // discriminant. We can safely use a type with the same size
988 // as the alignment of the first field of each variant.
989 // We increase the size of the discriminant to avoid LLVM copying
990 // padding when it doesn't need to. This normally causes unaligned
991 // load/stores and excessive memcpy/memset operations. By using a
992 // bigger integer size, LLVM can be sure about its contents and
993 // won't be so conservative.
995 // Use the initial field alignment
996 let mut ity = if def.repr.c() || def.repr.int.is_some() {
999 Integer::for_abi_align(dl, start_align).unwrap_or(min_ity)
1002 // If the alignment is not larger than the chosen discriminant size,
1003 // don't use the alignment as the final size.
1007 // Patch up the variants' first few fields.
1008 let old_ity_size = min_ity.size();
1009 let new_ity_size = ity.size();
1010 for variant in &mut layout_variants {
1011 match variant.fields {
1012 FieldPlacement::Arbitrary { ref mut offsets, .. } => {
1014 if *i <= old_ity_size {
1015 assert_eq!(*i, old_ity_size);
1019 // We might be making the struct larger.
1020 if variant.size <= old_ity_size {
1021 variant.size = new_ity_size;
1029 let tag_mask = !0u128 >> (128 - ity.size().bits());
1031 value: Int(ity, signed),
1032 valid_range: (min as u128 & tag_mask)..=(max as u128 & tag_mask),
1034 let mut abi = Abi::Aggregate { sized: true };
1035 if tag.value.size(dl) == size {
1036 abi = Abi::Scalar(tag.clone());
1038 // Try to use a ScalarPair for all tagged enums.
1039 let mut common_prim = None;
1040 for (field_layouts, layout_variant) in variants.iter().zip(&layout_variants) {
1041 let offsets = match layout_variant.fields {
1042 FieldPlacement::Arbitrary { ref offsets, .. } => offsets,
1045 let mut fields = field_layouts
1048 .filter(|p| !p.0.is_zst());
1049 let (field, offset) = match (fields.next(), fields.next()) {
1050 (None, None) => continue,
1051 (Some(pair), None) => pair,
1057 let prim = match field.details.abi {
1058 Abi::Scalar(ref scalar) => scalar.value,
1064 if let Some(pair) = common_prim {
1065 // This is pretty conservative. We could go fancier
1066 // by conflating things like i32 and u32, or even
1067 // realising that (u8, u8) could just cohabit with
1069 if pair != (prim, offset) {
1074 common_prim = Some((prim, offset));
1077 if let Some((prim, offset)) = common_prim {
1078 let pair = scalar_pair(tag.clone(), scalar_unit(prim));
1079 let pair_offsets = match pair.fields {
1080 FieldPlacement::Arbitrary {
1084 assert_eq!(memory_index, &[0, 1]);
1089 if pair_offsets[0] == Size::ZERO &&
1090 pair_offsets[1] == *offset &&
1091 align == pair.align &&
1093 // We can use `ScalarPair` only when it matches our
1094 // already computed layout (including `#[repr(C)]`).
1100 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1101 abi = Abi::Uninhabited;
1104 tcx.intern_layout(LayoutDetails {
1105 variants: Variants::Tagged {
1107 variants: layout_variants,
1109 fields: FieldPlacement::Arbitrary {
1110 offsets: vec![Size::ZERO],
1111 memory_index: vec![0]
1119 // Types with no meaningful known layout.
1120 ty::Projection(_) | ty::Opaque(..) => {
1121 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1122 if ty == normalized {
1123 return Err(LayoutError::Unknown(ty));
1125 tcx.layout_raw(param_env.and(normalized))?
1127 ty::UnnormalizedProjection(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
1128 bug!("LayoutDetails::compute: unexpected type `{}`", ty)
1130 ty::Param(_) | ty::Error => {
1131 return Err(LayoutError::Unknown(ty));
1136 /// This is invoked by the `layout_raw` query to record the final
1137 /// layout of each type.
1139 fn record_layout_for_printing(self, layout: TyLayout<'tcx>) {
1140 // If we are running with `-Zprint-type-sizes`, record layouts for
1141 // dumping later. Ignore layouts that are done with non-empty
1142 // environments or non-monomorphic layouts, as the user only wants
1143 // to see the stuff resulting from the final codegen session.
1145 !self.tcx.sess.opts.debugging_opts.print_type_sizes ||
1146 layout.ty.has_param_types() ||
1147 layout.ty.has_self_ty() ||
1148 !self.param_env.caller_bounds.is_empty()
1153 self.record_layout_for_printing_outlined(layout)
1156 fn record_layout_for_printing_outlined(self, layout: TyLayout<'tcx>) {
1157 // (delay format until we actually need it)
1158 let record = |kind, packed, opt_discr_size, variants| {
1159 let type_desc = format!("{:?}", layout.ty);
1160 self.tcx.sess.code_stats.borrow_mut().record_type_size(kind,
1169 let adt_def = match layout.ty.sty {
1170 ty::Adt(ref adt_def, _) => {
1171 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1175 ty::Closure(..) => {
1176 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1177 record(DataTypeKind::Closure, false, None, vec![]);
1182 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1187 let adt_kind = adt_def.adt_kind();
1188 let adt_packed = adt_def.repr.packed();
1190 let build_variant_info = |n: Option<ast::Name>,
1192 layout: TyLayout<'tcx>| {
1193 let mut min_size = Size::ZERO;
1194 let field_info: Vec<_> = flds.iter().enumerate().map(|(i, &name)| {
1195 match layout.field(self, i) {
1197 bug!("no layout found for field {}: `{:?}`", name, err);
1199 Ok(field_layout) => {
1200 let offset = layout.fields.offset(i);
1201 let field_end = offset + field_layout.size;
1202 if min_size < field_end {
1203 min_size = field_end;
1205 session::FieldInfo {
1206 name: name.to_string(),
1207 offset: offset.bytes(),
1208 size: field_layout.size.bytes(),
1209 align: field_layout.align.abi(),
1215 session::VariantInfo {
1216 name: n.map(|n|n.to_string()),
1217 kind: if layout.is_unsized() {
1218 session::SizeKind::Min
1220 session::SizeKind::Exact
1222 align: layout.align.abi(),
1223 size: if min_size.bytes() == 0 {
1232 match layout.variants {
1233 Variants::Single { index } => {
1234 debug!("print-type-size `{:#?}` variant {}",
1235 layout, adt_def.variants[index].name);
1236 if !adt_def.variants.is_empty() {
1237 let variant_def = &adt_def.variants[index];
1238 let fields: Vec<_> =
1239 variant_def.fields.iter().map(|f| f.ident.name).collect();
1240 record(adt_kind.into(),
1243 vec![build_variant_info(Some(variant_def.name),
1247 // (This case arises for *empty* enums; so give it
1249 record(adt_kind.into(), adt_packed, None, vec![]);
1253 Variants::NicheFilling { .. } |
1254 Variants::Tagged { .. } => {
1255 debug!("print-type-size `{:#?}` adt general variants def {}",
1256 layout.ty, adt_def.variants.len());
1257 let variant_infos: Vec<_> =
1258 adt_def.variants.iter().enumerate().map(|(i, variant_def)| {
1259 let fields: Vec<_> =
1260 variant_def.fields.iter().map(|f| f.ident.name).collect();
1261 build_variant_info(Some(variant_def.name),
1263 layout.for_variant(self, i))
1266 record(adt_kind.into(), adt_packed, match layout.variants {
1267 Variants::Tagged { ref tag, .. } => Some(tag.value.size(self)),
1275 /// Type size "skeleton", i.e. the only information determining a type's size.
1276 /// While this is conservative, (aside from constant sizes, only pointers,
1277 /// newtypes thereof and null pointer optimized enums are allowed), it is
1278 /// enough to statically check common usecases of transmute.
1279 #[derive(Copy, Clone, Debug)]
1280 pub enum SizeSkeleton<'tcx> {
1281 /// Any statically computable Layout.
1284 /// A potentially-fat pointer.
1286 /// If true, this pointer is never null.
1288 /// The type which determines the unsized metadata, if any,
1289 /// of this pointer. Either a type parameter or a projection
1290 /// depending on one, with regions erased.
1295 impl<'a, 'tcx> SizeSkeleton<'tcx> {
1296 pub fn compute(ty: Ty<'tcx>,
1297 tcx: TyCtxt<'a, 'tcx, 'tcx>,
1298 param_env: ty::ParamEnv<'tcx>)
1299 -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1300 debug_assert!(!ty.has_infer_types());
1302 // First try computing a static layout.
1303 let err = match tcx.layout_of(param_env.and(ty)) {
1305 return Ok(SizeSkeleton::Known(layout.size));
1311 ty::Ref(_, pointee, _) |
1312 ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1313 let non_zero = !ty.is_unsafe_ptr();
1314 let tail = tcx.struct_tail(pointee);
1316 ty::Param(_) | ty::Projection(_) => {
1317 debug_assert!(tail.has_param_types() || tail.has_self_ty());
1318 Ok(SizeSkeleton::Pointer {
1320 tail: tcx.erase_regions(&tail)
1324 bug!("SizeSkeleton::compute({}): layout errored ({}), yet \
1325 tail `{}` is not a type parameter or a projection",
1331 ty::Adt(def, substs) => {
1332 // Only newtypes and enums w/ nullable pointer optimization.
1333 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
1337 // Get a zero-sized variant or a pointer newtype.
1338 let zero_or_ptr_variant = |i: usize| {
1339 let fields = def.variants[i].fields.iter().map(|field| {
1340 SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env)
1343 for field in fields {
1346 SizeSkeleton::Known(size) => {
1347 if size.bytes() > 0 {
1351 SizeSkeleton::Pointer {..} => {
1362 let v0 = zero_or_ptr_variant(0)?;
1364 if def.variants.len() == 1 {
1365 if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1366 return Ok(SizeSkeleton::Pointer {
1367 non_zero: non_zero || match tcx.layout_scalar_valid_range(def.did) {
1368 (Bound::Included(start), Bound::Unbounded) => start > 0,
1369 (Bound::Included(start), Bound::Included(end)) =>
1370 0 < start && start < end,
1380 let v1 = zero_or_ptr_variant(1)?;
1381 // Nullable pointer enum optimization.
1383 (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None) |
1384 (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
1385 Ok(SizeSkeleton::Pointer {
1394 ty::Projection(_) | ty::Opaque(..) => {
1395 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1396 if ty == normalized {
1399 SizeSkeleton::compute(normalized, tcx, param_env)
1407 pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
1408 match (self, other) {
1409 (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
1410 (SizeSkeleton::Pointer { tail: a, .. },
1411 SizeSkeleton::Pointer { tail: b, .. }) => a == b,
1417 pub trait HasTyCtxt<'tcx>: HasDataLayout {
1418 fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx>;
1421 impl<'a, 'gcx, 'tcx> HasDataLayout for TyCtxt<'a, 'gcx, 'tcx> {
1422 fn data_layout(&self) -> &TargetDataLayout {
1427 impl<'a, 'gcx, 'tcx> HasTyCtxt<'gcx> for TyCtxt<'a, 'gcx, 'tcx> {
1428 fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> {
1433 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
1434 fn data_layout(&self) -> &TargetDataLayout {
1435 self.tcx.data_layout()
1439 impl<'gcx, 'tcx, T: HasTyCtxt<'gcx>> HasTyCtxt<'gcx> for LayoutCx<'tcx, T> {
1440 fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> {
1445 pub trait MaybeResult<T> {
1446 fn from_ok(x: T) -> Self;
1447 fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self;
1450 impl<T> MaybeResult<T> for T {
1451 fn from_ok(x: T) -> Self {
1454 fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self {
1459 impl<T, E> MaybeResult<T> for Result<T, E> {
1460 fn from_ok(x: T) -> Self {
1463 fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self {
1468 pub type TyLayout<'tcx> = ::rustc_target::abi::TyLayout<'tcx, Ty<'tcx>>;
1470 impl<'a, 'tcx> LayoutOf for LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
1472 type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
1474 /// Computes the layout of a type. Note that this implicitly
1475 /// executes in "reveal all" mode.
1476 fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
1477 let param_env = self.param_env.with_reveal_all();
1478 let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1479 let details = self.tcx.layout_raw(param_env.and(ty))?;
1480 let layout = TyLayout {
1485 // NB: This recording is normally disabled; when enabled, it
1486 // can however trigger recursive invocations of `layout_of`.
1487 // Therefore, we execute it *after* the main query has
1488 // completed, to avoid problems around recursive structures
1489 // and the like. (Admittedly, I wasn't able to reproduce a problem
1490 // here, but it seems like the right thing to do. -nmatsakis)
1491 self.record_layout_for_printing(layout);
1497 impl<'a, 'tcx> LayoutOf for LayoutCx<'tcx, ty::query::TyCtxtAt<'a, 'tcx, 'tcx>> {
1499 type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
1501 /// Computes the layout of a type. Note that this implicitly
1502 /// executes in "reveal all" mode.
1503 fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
1504 let param_env = self.param_env.with_reveal_all();
1505 let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1506 let details = self.tcx.layout_raw(param_env.and(ty))?;
1507 let layout = TyLayout {
1512 // NB: This recording is normally disabled; when enabled, it
1513 // can however trigger recursive invocations of `layout_of`.
1514 // Therefore, we execute it *after* the main query has
1515 // completed, to avoid problems around recursive structures
1516 // and the like. (Admittedly, I wasn't able to reproduce a problem
1517 // here, but it seems like the right thing to do. -nmatsakis)
1520 param_env: self.param_env
1522 cx.record_layout_for_printing(layout);
1528 // Helper (inherent) `layout_of` methods to avoid pushing `LayoutCx` to users.
1529 impl TyCtxt<'a, 'tcx, '_> {
1530 /// Computes the layout of a type. Note that this implicitly
1531 /// executes in "reveal all" mode.
1533 pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
1534 -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
1536 tcx: self.global_tcx(),
1537 param_env: param_env_and_ty.param_env
1539 cx.layout_of(param_env_and_ty.value)
1543 impl ty::query::TyCtxtAt<'a, 'tcx, '_> {
1544 /// Computes the layout of a type. Note that this implicitly
1545 /// executes in "reveal all" mode.
1547 pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
1548 -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
1550 tcx: self.global_tcx().at(self.span),
1551 param_env: param_env_and_ty.param_env
1553 cx.layout_of(param_env_and_ty.value)
1557 impl<'a, 'tcx, C> TyLayoutMethods<'tcx, C> for Ty<'tcx>
1558 where C: LayoutOf<Ty = Ty<'tcx>> + HasTyCtxt<'tcx>,
1559 C::TyLayout: MaybeResult<TyLayout<'tcx>>
1561 fn for_variant(this: TyLayout<'tcx>, cx: C, variant_index: usize) -> TyLayout<'tcx> {
1562 let details = match this.variants {
1563 Variants::Single { index } if index == variant_index => this.details,
1565 Variants::Single { index } => {
1566 // Deny calling for_variant more than once for non-Single enums.
1567 cx.layout_of(this.ty).map_same(|layout| {
1568 assert_eq!(layout.variants, Variants::Single { index });
1572 let fields = match this.ty.sty {
1573 ty::Adt(def, _) => def.variants[variant_index].fields.len(),
1577 tcx.intern_layout(LayoutDetails {
1578 variants: Variants::Single { index: variant_index },
1579 fields: FieldPlacement::Union(fields),
1580 abi: Abi::Uninhabited,
1581 align: tcx.data_layout.i8_align,
1586 Variants::NicheFilling { ref variants, .. } |
1587 Variants::Tagged { ref variants, .. } => {
1588 &variants[variant_index]
1592 assert_eq!(details.variants, Variants::Single { index: variant_index });
1600 fn field(this: TyLayout<'tcx>, cx: C, i: usize) -> C::TyLayout {
1602 cx.layout_of(match this.ty.sty {
1611 ty::GeneratorWitness(..) |
1613 ty::Dynamic(..) => {
1614 bug!("TyLayout::field_type({:?}): not applicable", this)
1617 // Potentially-fat pointers.
1618 ty::Ref(_, pointee, _) |
1619 ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1620 assert!(i < this.fields.count());
1622 // Reuse the fat *T type as its own thin pointer data field.
1623 // This provides information about e.g. DST struct pointees
1624 // (which may have no non-DST form), and will work as long
1625 // as the `Abi` or `FieldPlacement` is checked by users.
1627 let nil = tcx.mk_unit();
1628 let ptr_ty = if this.ty.is_unsafe_ptr() {
1631 tcx.mk_mut_ref(tcx.types.re_static, nil)
1633 return cx.layout_of(ptr_ty).map_same(|mut ptr_layout| {
1634 ptr_layout.ty = this.ty;
1639 match tcx.struct_tail(pointee).sty {
1641 ty::Str => tcx.types.usize,
1642 ty::Dynamic(_, _) => {
1644 tcx.types.re_static,
1645 tcx.mk_array(tcx.types.usize, 3),
1647 /* FIXME use actual fn pointers
1648 Warning: naively computing the number of entries in the
1649 vtable by counting the methods on the trait + methods on
1650 all parent traits does not work, because some methods can
1651 be not object safe and thus excluded from the vtable.
1652 Increase this counter if you tried to implement this but
1653 failed to do it without duplicating a lot of code from
1654 other places in the compiler: 2
1656 tcx.mk_array(tcx.types.usize, 3),
1657 tcx.mk_array(Option<fn()>),
1661 _ => bug!("TyLayout::field_type({:?}): not applicable", this)
1665 // Arrays and slices.
1666 ty::Array(element, _) |
1667 ty::Slice(element) => element,
1668 ty::Str => tcx.types.u8,
1670 // Tuples, generators and closures.
1671 ty::Closure(def_id, ref substs) => {
1672 substs.upvar_tys(def_id, tcx).nth(i).unwrap()
1675 ty::Generator(def_id, ref substs, _) => {
1676 substs.field_tys(def_id, tcx).nth(i).unwrap()
1679 ty::Tuple(tys) => tys[i],
1681 // SIMD vector types.
1682 ty::Adt(def, ..) if def.repr.simd() => {
1683 this.ty.simd_type(tcx)
1687 ty::Adt(def, substs) => {
1688 match this.variants {
1689 Variants::Single { index } => {
1690 def.variants[index].fields[i].ty(tcx, substs)
1693 // Discriminant field for enums (where applicable).
1694 Variants::Tagged { tag: ref discr, .. } |
1695 Variants::NicheFilling { niche: ref discr, .. } => {
1697 let layout = LayoutDetails::scalar(tcx, discr.clone());
1698 return MaybeResult::from_ok(TyLayout {
1699 details: tcx.intern_layout(layout),
1700 ty: discr.value.to_ty(tcx)
1706 ty::Projection(_) | ty::UnnormalizedProjection(..) |
1707 ty::Opaque(..) | ty::Param(_) | ty::Infer(_) | ty::Error => {
1708 bug!("TyLayout::field_type: unexpected type `{}`", this.ty)
1721 fn reserve<'a, 'tcx>(
1723 cx: LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>>,
1725 ) -> Option<(u128, Scalar)> {
1726 if count > self.available {
1729 let Scalar { value, valid_range: ref v } = self.scalar;
1730 let bits = value.size(cx).bits();
1731 assert!(bits <= 128);
1732 let max_value = !0u128 >> (128 - bits);
1733 let start = v.end().wrapping_add(1) & max_value;
1734 let end = v.end().wrapping_add(count) & max_value;
1735 Some((start, Scalar { value, valid_range: *v.start()..=end }))
1739 impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
1740 /// Find the offset of a niche leaf field, starting from
1741 /// the given type and recursing through aggregates.
1742 // FIXME(eddyb) traverse already optimized enums.
1743 fn find_niche(self, layout: TyLayout<'tcx>) -> Result<Option<Niche>, LayoutError<'tcx>> {
1744 let scalar_niche = |scalar: &Scalar, offset| {
1745 let Scalar { value, valid_range: ref v } = *scalar;
1747 let bits = value.size(self).bits();
1748 assert!(bits <= 128);
1749 let max_value = !0u128 >> (128 - bits);
1751 // Find out how many values are outside the valid range.
1752 let available = if v.start() <= v.end() {
1753 v.start() + (max_value - v.end())
1755 v.start() - v.end() - 1
1758 // Give up if there is no niche value available.
1763 Some(Niche { offset, scalar: scalar.clone(), available })
1766 // Locals variables which live across yields are stored
1767 // in the generator type as fields. These may be uninitialized
1768 // so we don't look for niches there.
1769 if let ty::Generator(..) = layout.ty.sty {
1774 Abi::Scalar(ref scalar) => {
1775 return Ok(scalar_niche(scalar, Size::ZERO));
1777 Abi::ScalarPair(ref a, ref b) => {
1778 // HACK(nox): We iter on `b` and then `a` because `max_by_key`
1779 // returns the last maximum.
1780 let niche = iter::once((b, a.value.size(self).abi_align(b.value.align(self))))
1781 .chain(iter::once((a, Size::ZERO)))
1782 .filter_map(|(scalar, offset)| scalar_niche(scalar, offset))
1783 .max_by_key(|niche| niche.available);
1786 Abi::Vector { ref element, .. } => {
1787 return Ok(scalar_niche(element, Size::ZERO));
1792 // Perhaps one of the fields is non-zero, let's recurse and find out.
1793 if let FieldPlacement::Union(_) = layout.fields {
1794 // Only Rust enums have safe-to-inspect fields
1795 // (a discriminant), other unions are unsafe.
1796 if let Variants::Single { .. } = layout.variants {
1800 if let FieldPlacement::Array { .. } = layout.fields {
1801 if layout.fields.count() > 0 {
1802 return self.find_niche(layout.field(self, 0)?);
1807 let mut niche = None;
1808 let mut available = 0;
1809 for i in 0..layout.fields.count() {
1810 if let Some(mut c) = self.find_niche(layout.field(self, i)?)? {
1811 if c.available > available {
1812 available = c.available;
1813 c.offset += layout.fields.offset(i);
1822 impl<'a> HashStable<StableHashingContext<'a>> for Variants {
1823 fn hash_stable<W: StableHasherResult>(&self,
1824 hcx: &mut StableHashingContext<'a>,
1825 hasher: &mut StableHasher<W>) {
1826 use ty::layout::Variants::*;
1827 mem::discriminant(self).hash_stable(hcx, hasher);
1830 Single { index } => {
1831 index.hash_stable(hcx, hasher);
1837 tag.hash_stable(hcx, hasher);
1838 variants.hash_stable(hcx, hasher);
1847 dataful_variant.hash_stable(hcx, hasher);
1848 niche_variants.start().hash_stable(hcx, hasher);
1849 niche_variants.end().hash_stable(hcx, hasher);
1850 niche.hash_stable(hcx, hasher);
1851 niche_start.hash_stable(hcx, hasher);
1852 variants.hash_stable(hcx, hasher);
1858 impl<'a> HashStable<StableHashingContext<'a>> for FieldPlacement {
1859 fn hash_stable<W: StableHasherResult>(&self,
1860 hcx: &mut StableHashingContext<'a>,
1861 hasher: &mut StableHasher<W>) {
1862 use ty::layout::FieldPlacement::*;
1863 mem::discriminant(self).hash_stable(hcx, hasher);
1867 count.hash_stable(hcx, hasher);
1869 Array { count, stride } => {
1870 count.hash_stable(hcx, hasher);
1871 stride.hash_stable(hcx, hasher);
1873 Arbitrary { ref offsets, ref memory_index } => {
1874 offsets.hash_stable(hcx, hasher);
1875 memory_index.hash_stable(hcx, hasher);
1881 impl<'a> HashStable<StableHashingContext<'a>> for Abi {
1882 fn hash_stable<W: StableHasherResult>(&self,
1883 hcx: &mut StableHashingContext<'a>,
1884 hasher: &mut StableHasher<W>) {
1885 use ty::layout::Abi::*;
1886 mem::discriminant(self).hash_stable(hcx, hasher);
1890 Scalar(ref value) => {
1891 value.hash_stable(hcx, hasher);
1893 ScalarPair(ref a, ref b) => {
1894 a.hash_stable(hcx, hasher);
1895 b.hash_stable(hcx, hasher);
1897 Vector { ref element, count } => {
1898 element.hash_stable(hcx, hasher);
1899 count.hash_stable(hcx, hasher);
1901 Aggregate { sized } => {
1902 sized.hash_stable(hcx, hasher);
1908 impl<'a> HashStable<StableHashingContext<'a>> for Scalar {
1909 fn hash_stable<W: StableHasherResult>(&self,
1910 hcx: &mut StableHashingContext<'a>,
1911 hasher: &mut StableHasher<W>) {
1912 let Scalar { value, ref valid_range } = *self;
1913 value.hash_stable(hcx, hasher);
1914 valid_range.start().hash_stable(hcx, hasher);
1915 valid_range.end().hash_stable(hcx, hasher);
1919 impl_stable_hash_for!(struct ::ty::layout::LayoutDetails {
1927 impl_stable_hash_for!(enum ::ty::layout::Integer {
1935 impl_stable_hash_for!(enum ::ty::layout::Primitive {
1936 Int(integer, signed),
1941 impl<'gcx> HashStable<StableHashingContext<'gcx>> for Align {
1942 fn hash_stable<W: StableHasherResult>(&self,
1943 hcx: &mut StableHashingContext<'gcx>,
1944 hasher: &mut StableHasher<W>) {
1945 self.abi().hash_stable(hcx, hasher);
1946 self.pref().hash_stable(hcx, hasher);
1950 impl<'gcx> HashStable<StableHashingContext<'gcx>> for Size {
1951 fn hash_stable<W: StableHasherResult>(&self,
1952 hcx: &mut StableHashingContext<'gcx>,
1953 hasher: &mut StableHasher<W>) {
1954 self.bytes().hash_stable(hcx, hasher);
1958 impl<'a, 'gcx> HashStable<StableHashingContext<'a>> for LayoutError<'gcx>
1960 fn hash_stable<W: StableHasherResult>(&self,
1961 hcx: &mut StableHashingContext<'a>,
1962 hasher: &mut StableHasher<W>) {
1963 use ty::layout::LayoutError::*;
1964 mem::discriminant(self).hash_stable(hcx, hasher);
1968 SizeOverflow(t) => t.hash_stable(hcx, hasher)