1 // Copyright 2016 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 use session::{self, DataTypeKind};
12 use ty::{self, Ty, TyCtxt, TypeFoldable, ReprOptions};
14 use syntax::ast::{self, IntTy, UintTy};
16 use syntax_pos::DUMMY_SP;
25 use ich::StableHashingContext;
26 use rustc_data_structures::indexed_vec::{IndexVec, Idx};
27 use rustc_data_structures::stable_hasher::{HashStable, StableHasher,
30 pub use rustc_target::abi::*;
32 pub trait IntegerExt {
33 fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, signed: bool) -> Ty<'tcx>;
34 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
35 fn repr_discr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
43 impl IntegerExt for Integer {
44 fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, signed: bool) -> Ty<'tcx> {
45 match (*self, signed) {
46 (I8, false) => tcx.types.u8,
47 (I16, false) => tcx.types.u16,
48 (I32, false) => tcx.types.u32,
49 (I64, false) => tcx.types.u64,
50 (I128, false) => tcx.types.u128,
51 (I8, true) => tcx.types.i8,
52 (I16, true) => tcx.types.i16,
53 (I32, true) => tcx.types.i32,
54 (I64, true) => tcx.types.i64,
55 (I128, true) => tcx.types.i128,
59 /// Get the Integer type from an attr::IntType.
60 fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
61 let dl = cx.data_layout();
64 attr::SignedInt(IntTy::I8) | attr::UnsignedInt(UintTy::U8) => I8,
65 attr::SignedInt(IntTy::I16) | attr::UnsignedInt(UintTy::U16) => I16,
66 attr::SignedInt(IntTy::I32) | attr::UnsignedInt(UintTy::U32) => I32,
67 attr::SignedInt(IntTy::I64) | attr::UnsignedInt(UintTy::U64) => I64,
68 attr::SignedInt(IntTy::I128) | attr::UnsignedInt(UintTy::U128) => I128,
69 attr::SignedInt(IntTy::Isize) | attr::UnsignedInt(UintTy::Usize) => {
70 dl.ptr_sized_integer()
75 /// Find the appropriate Integer type and signedness for the given
76 /// signed discriminant range and #[repr] attribute.
77 /// N.B.: u128 values above i128::MAX will be treated as signed, but
78 /// that shouldn't affect anything, other than maybe debuginfo.
79 fn repr_discr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
85 // Theoretically, negative values could be larger in unsigned representation
86 // than the unsigned representation of the signed minimum. However, if there
87 // are any negative values, the only valid unsigned representation is u128
88 // which can fit all i128 values, so the result remains unaffected.
89 let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
90 let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
92 let mut min_from_extern = None;
95 if let Some(ity) = repr.int {
96 let discr = Integer::from_attr(&tcx, ity);
97 let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
99 bug!("Integer::repr_discr: `#[repr]` hint too small for \
100 discriminant range of enum `{}", ty)
102 return (discr, ity.is_signed());
106 match &tcx.sess.target.target.arch[..] {
107 // WARNING: the ARM EABI has two variants; the one corresponding
108 // to `at_least == I32` appears to be used on Linux and NetBSD,
109 // but some systems may use the variant corresponding to no
110 // lower bound. However, we don't run on those yet...?
111 "arm" => min_from_extern = Some(I32),
112 _ => min_from_extern = Some(I32),
116 let at_least = min_from_extern.unwrap_or(min_default);
118 // If there are no negative values, we can use the unsigned fit.
120 (cmp::max(unsigned_fit, at_least), false)
122 (cmp::max(signed_fit, at_least), true)
127 pub trait PrimitiveExt {
128 fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx>;
131 impl PrimitiveExt for Primitive {
132 fn to_ty<'a, 'tcx>(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx> {
134 Int(i, signed) => i.to_ty(tcx, signed),
135 Float(FloatTy::F32) => tcx.types.f32,
136 Float(FloatTy::F64) => tcx.types.f64,
137 Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
142 /// The first half of a fat pointer.
144 /// - For a trait object, this is the address of the box.
145 /// - For a slice, this is the base address.
146 pub const FAT_PTR_ADDR: usize = 0;
148 /// The second half of a fat pointer.
150 /// - For a trait object, this is the address of the vtable.
151 /// - For a slice, this is the length.
152 pub const FAT_PTR_EXTRA: usize = 1;
154 #[derive(Copy, Clone, Debug, RustcEncodable, RustcDecodable)]
155 pub enum LayoutError<'tcx> {
157 SizeOverflow(Ty<'tcx>)
160 impl<'tcx> fmt::Display for LayoutError<'tcx> {
161 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
163 LayoutError::Unknown(ty) => {
164 write!(f, "the type `{:?}` has an unknown layout", ty)
166 LayoutError::SizeOverflow(ty) => {
167 write!(f, "the type `{:?}` is too big for the current architecture", ty)
173 fn layout_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
174 query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
175 -> Result<&'tcx LayoutDetails, LayoutError<'tcx>>
177 ty::tls::with_related_context(tcx, move |icx| {
178 let rec_limit = *tcx.sess.recursion_limit.get();
179 let (param_env, ty) = query.into_parts();
181 if icx.layout_depth > rec_limit {
183 &format!("overflow representing the type `{}`", ty));
186 // Update the ImplicitCtxt to increase the layout_depth
187 let icx = ty::tls::ImplicitCtxt {
188 layout_depth: icx.layout_depth + 1,
192 ty::tls::enter_context(&icx, |_| {
193 let cx = LayoutCx { tcx, param_env };
194 cx.layout_raw_uncached(ty)
199 pub fn provide(providers: &mut ty::query::Providers<'_>) {
200 *providers = ty::query::Providers {
206 pub struct LayoutCx<'tcx, C> {
208 pub param_env: ty::ParamEnv<'tcx>
211 impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
212 fn layout_raw_uncached(&self, ty: Ty<'tcx>)
213 -> Result<&'tcx LayoutDetails, LayoutError<'tcx>> {
215 let param_env = self.param_env;
216 let dl = self.data_layout();
217 let scalar_unit = |value: Primitive| {
218 let bits = value.size(dl).bits();
219 assert!(bits <= 128);
222 valid_range: 0..=(!0 >> (128 - bits))
225 let scalar = |value: Primitive| {
226 tcx.intern_layout(LayoutDetails::scalar(self, scalar_unit(value)))
228 let scalar_pair = |a: Scalar, b: Scalar| {
229 let align = a.value.align(dl).max(b.value.align(dl)).max(dl.aggregate_align);
230 let b_offset = a.value.size(dl).abi_align(b.value.align(dl));
231 let size = (b_offset + b.value.size(dl)).abi_align(align);
233 variants: Variants::Single { index: VariantIdx::new(0) },
234 fields: FieldPlacement::Arbitrary {
235 offsets: vec![Size::ZERO, b_offset],
236 memory_index: vec![0, 1]
238 abi: Abi::ScalarPair(a, b),
244 #[derive(Copy, Clone, Debug)]
246 /// A tuple, closure, or univariant which cannot be coerced to unsized.
248 /// A univariant, the last field of which may be coerced to unsized.
250 /// A univariant, but with a prefix of an arbitrary size & alignment (e.g. enum tag).
251 Prefixed(Size, Align),
254 let univariant_uninterned = |fields: &[TyLayout<'_>], repr: &ReprOptions, kind| {
255 let packed = repr.packed();
256 if packed && repr.align > 0 {
257 bug!("struct cannot be packed and aligned");
261 let pack = repr.pack as u64;
262 Align::from_bytes(pack, pack).unwrap()
265 let mut align = if packed {
271 let mut sized = true;
272 let mut offsets = vec![Size::ZERO; fields.len()];
273 let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
275 let mut optimize = !repr.inhibit_struct_field_reordering_opt();
276 if let StructKind::Prefixed(_, align) = kind {
277 optimize &= align.abi() == 1;
281 let end = if let StructKind::MaybeUnsized = kind {
286 let optimizing = &mut inverse_memory_index[..end];
287 let field_align = |f: &TyLayout<'_>| {
288 if packed { f.align.min(pack).abi() } else { f.align.abi() }
291 StructKind::AlwaysSized |
292 StructKind::MaybeUnsized => {
293 optimizing.sort_by_key(|&x| {
294 // Place ZSTs first to avoid "interesting offsets",
295 // especially with only one or two non-ZST fields.
296 let f = &fields[x as usize];
297 (!f.is_zst(), cmp::Reverse(field_align(f)))
300 StructKind::Prefixed(..) => {
301 optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
306 // inverse_memory_index holds field indices by increasing memory offset.
307 // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
308 // We now write field offsets to the corresponding offset slot;
309 // field 5 with offset 0 puts 0 in offsets[5].
310 // At the bottom of this function, we use inverse_memory_index to produce memory_index.
312 let mut offset = Size::ZERO;
314 if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
316 let prefix_align = prefix_align.min(pack);
317 align = align.max(prefix_align);
319 align = align.max(prefix_align);
321 offset = prefix_size.abi_align(prefix_align);
324 for &i in &inverse_memory_index {
325 let field = fields[i as usize];
327 bug!("univariant: field #{} of `{}` comes after unsized field",
331 if field.is_unsized() {
335 // Invariant: offset < dl.obj_size_bound() <= 1<<61
337 let field_pack = field.align.min(pack);
338 offset = offset.abi_align(field_pack);
339 align = align.max(field_pack);
342 offset = offset.abi_align(field.align);
343 align = align.max(field.align);
346 debug!("univariant offset: {:?} field: {:#?}", offset, field);
347 offsets[i as usize] = offset;
349 offset = offset.checked_add(field.size, dl)
350 .ok_or(LayoutError::SizeOverflow(ty))?;
354 let repr_align = repr.align as u64;
355 align = align.max(Align::from_bytes(repr_align, repr_align).unwrap());
356 debug!("univariant repr_align: {:?}", repr_align);
359 debug!("univariant min_size: {:?}", offset);
360 let min_size = offset;
362 // As stated above, inverse_memory_index holds field indices by increasing offset.
363 // This makes it an already-sorted view of the offsets vec.
364 // To invert it, consider:
365 // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
366 // Field 5 would be the first element, so memory_index is i:
367 // Note: if we didn't optimize, it's already right.
369 let mut memory_index;
371 memory_index = vec![0; inverse_memory_index.len()];
373 for i in 0..inverse_memory_index.len() {
374 memory_index[inverse_memory_index[i] as usize] = i as u32;
377 memory_index = inverse_memory_index;
380 let size = min_size.abi_align(align);
381 let mut abi = Abi::Aggregate { sized };
383 // Unpack newtype ABIs and find scalar pairs.
384 if sized && size.bytes() > 0 {
385 // All other fields must be ZSTs, and we need them to all start at 0.
386 let mut zst_offsets =
387 offsets.iter().enumerate().filter(|&(i, _)| fields[i].is_zst());
388 if zst_offsets.all(|(_, o)| o.bytes() == 0) {
389 let mut non_zst_fields =
390 fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
392 match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
393 // We have exactly one non-ZST field.
394 (Some((i, field)), None, None) => {
395 // Field fills the struct and it has a scalar or scalar pair ABI.
396 if offsets[i].bytes() == 0 &&
397 align.abi() == field.align.abi() &&
400 // For plain scalars, or vectors of them, we can't unpack
401 // newtypes for `#[repr(C)]`, as that affects C ABIs.
402 Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
403 abi = field.abi.clone();
405 // But scalar pairs are Rust-specific and get
406 // treated as aggregates by C ABIs anyway.
407 Abi::ScalarPair(..) => {
408 abi = field.abi.clone();
415 // Two non-ZST fields, and they're both scalars.
416 (Some((i, &TyLayout {
417 details: &LayoutDetails { abi: Abi::Scalar(ref a), .. }, ..
418 })), Some((j, &TyLayout {
419 details: &LayoutDetails { abi: Abi::Scalar(ref b), .. }, ..
421 // Order by the memory placement, not source order.
422 let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
427 let pair = scalar_pair(a.clone(), b.clone());
428 let pair_offsets = match pair.fields {
429 FieldPlacement::Arbitrary {
433 assert_eq!(memory_index, &[0, 1]);
438 if offsets[i] == pair_offsets[0] &&
439 offsets[j] == pair_offsets[1] &&
440 align == pair.align &&
442 // We can use `ScalarPair` only when it matches our
443 // already computed layout (including `#[repr(C)]`).
453 if sized && fields.iter().any(|f| f.abi.is_uninhabited()) {
454 abi = Abi::Uninhabited;
458 variants: Variants::Single { index: VariantIdx::new(0) },
459 fields: FieldPlacement::Arbitrary {
468 let univariant = |fields: &[TyLayout<'_>], repr: &ReprOptions, kind| {
469 Ok(tcx.intern_layout(univariant_uninterned(fields, repr, kind)?))
471 debug_assert!(!ty.has_infer_types());
476 tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
477 value: Int(I8, false),
482 tcx.intern_layout(LayoutDetails::scalar(self, Scalar {
483 value: Int(I32, false),
484 valid_range: 0..=0x10FFFF
488 scalar(Int(Integer::from_attr(dl, attr::SignedInt(ity)), true))
491 scalar(Int(Integer::from_attr(dl, attr::UnsignedInt(ity)), false))
493 ty::Float(fty) => scalar(Float(fty)),
495 let mut ptr = scalar_unit(Pointer);
496 ptr.valid_range = 1..=*ptr.valid_range.end();
497 tcx.intern_layout(LayoutDetails::scalar(self, ptr))
502 tcx.intern_layout(LayoutDetails {
503 variants: Variants::Single { index: VariantIdx::new(0) },
504 fields: FieldPlacement::Union(0),
505 abi: Abi::Uninhabited,
511 // Potentially-fat pointers.
512 ty::Ref(_, pointee, _) |
513 ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
514 let mut data_ptr = scalar_unit(Pointer);
515 if !ty.is_unsafe_ptr() {
516 data_ptr.valid_range = 1..=*data_ptr.valid_range.end();
519 let pointee = tcx.normalize_erasing_regions(param_env, pointee);
520 if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
521 return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
524 let unsized_part = tcx.struct_tail(pointee);
525 let metadata = match unsized_part.sty {
527 return Ok(tcx.intern_layout(LayoutDetails::scalar(self, data_ptr)));
529 ty::Slice(_) | ty::Str => {
530 scalar_unit(Int(dl.ptr_sized_integer(), false))
533 let mut vtable = scalar_unit(Pointer);
534 vtable.valid_range = 1..=*vtable.valid_range.end();
537 _ => return Err(LayoutError::Unknown(unsized_part))
540 // Effectively a (ptr, meta) tuple.
541 tcx.intern_layout(scalar_pair(data_ptr, metadata))
544 // Arrays and slices.
545 ty::Array(element, mut count) => {
546 if count.has_projections() {
547 count = tcx.normalize_erasing_regions(param_env, count);
548 if count.has_projections() {
549 return Err(LayoutError::Unknown(ty));
553 let element = self.layout_of(element)?;
554 let count = count.unwrap_usize(tcx);
555 let size = element.size.checked_mul(count, dl)
556 .ok_or(LayoutError::SizeOverflow(ty))?;
558 tcx.intern_layout(LayoutDetails {
559 variants: Variants::Single { index: VariantIdx::new(0) },
560 fields: FieldPlacement::Array {
561 stride: element.size,
564 abi: Abi::Aggregate { sized: true },
565 align: element.align,
569 ty::Slice(element) => {
570 let element = self.layout_of(element)?;
571 tcx.intern_layout(LayoutDetails {
572 variants: Variants::Single { index: VariantIdx::new(0) },
573 fields: FieldPlacement::Array {
574 stride: element.size,
577 abi: Abi::Aggregate { sized: false },
578 align: element.align,
583 tcx.intern_layout(LayoutDetails {
584 variants: Variants::Single { index: VariantIdx::new(0) },
585 fields: FieldPlacement::Array {
586 stride: Size::from_bytes(1),
589 abi: Abi::Aggregate { sized: false },
597 univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?
599 ty::Dynamic(..) | ty::Foreign(..) => {
600 let mut unit = univariant_uninterned(&[], &ReprOptions::default(),
601 StructKind::AlwaysSized)?;
603 Abi::Aggregate { ref mut sized } => *sized = false,
606 tcx.intern_layout(unit)
609 // Tuples, generators and closures.
610 ty::Generator(def_id, ref substs, _) => {
611 let tys = substs.field_tys(def_id, tcx);
612 univariant(&tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
613 &ReprOptions::default(),
614 StructKind::AlwaysSized)?
617 ty::Closure(def_id, ref substs) => {
618 let tys = substs.upvar_tys(def_id, tcx);
619 univariant(&tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
620 &ReprOptions::default(),
621 StructKind::AlwaysSized)?
625 let kind = if tys.len() == 0 {
626 StructKind::AlwaysSized
628 StructKind::MaybeUnsized
631 univariant(&tys.iter().map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
632 &ReprOptions::default(), kind)?
635 // SIMD vector types.
636 ty::Adt(def, ..) if def.repr.simd() => {
637 let element = self.layout_of(ty.simd_type(tcx))?;
638 let count = ty.simd_size(tcx) as u64;
640 let scalar = match element.abi {
641 Abi::Scalar(ref scalar) => scalar.clone(),
643 tcx.sess.fatal(&format!("monomorphising SIMD type `{}` with \
644 a non-machine element type `{}`",
648 let size = element.size.checked_mul(count, dl)
649 .ok_or(LayoutError::SizeOverflow(ty))?;
650 let align = dl.vector_align(size);
651 let size = size.abi_align(align);
653 tcx.intern_layout(LayoutDetails {
654 variants: Variants::Single { index: VariantIdx::new(0) },
655 fields: FieldPlacement::Array {
656 stride: element.size,
669 ty::Adt(def, substs) => {
670 // Cache the field layouts.
671 let variants = def.variants.iter().map(|v| {
672 v.fields.iter().map(|field| {
673 self.layout_of(field.ty(tcx, substs))
674 }).collect::<Result<Vec<_>, _>>()
675 }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
678 let packed = def.repr.packed();
679 if packed && def.repr.align > 0 {
680 bug!("Union cannot be packed and aligned");
684 let pack = def.repr.pack as u64;
685 Align::from_bytes(pack, pack).unwrap()
688 let mut align = if packed {
694 if def.repr.align > 0 {
695 let repr_align = def.repr.align as u64;
697 Align::from_bytes(repr_align, repr_align).unwrap());
700 let optimize = !def.repr.inhibit_union_abi_opt();
701 let mut size = Size::ZERO;
702 let mut abi = Abi::Aggregate { sized: true };
703 let index = VariantIdx::new(0);
704 for field in &variants[index] {
705 assert!(!field.is_unsized());
708 let field_pack = field.align.min(pack);
709 align = align.max(field_pack);
711 align = align.max(field.align);
714 // If all non-ZST fields have the same ABI, forward this ABI
715 if optimize && !field.is_zst() {
716 // Normalize scalar_unit to the maximal valid range
717 let field_abi = match &field.abi {
718 Abi::Scalar(x) => Abi::Scalar(scalar_unit(x.value)),
719 Abi::ScalarPair(x, y) => {
721 scalar_unit(x.value),
722 scalar_unit(y.value),
725 Abi::Vector { element: x, count } => {
727 element: scalar_unit(x.value),
732 Abi::Aggregate { .. } => Abi::Aggregate { sized: true },
735 if size == Size::ZERO {
736 // first non ZST: initialize 'abi'
738 } else if abi != field_abi {
739 // different fields have different ABI: reset to Aggregate
740 abi = Abi::Aggregate { sized: true };
744 size = cmp::max(size, field.size);
747 return Ok(tcx.intern_layout(LayoutDetails {
748 variants: Variants::Single { index },
749 fields: FieldPlacement::Union(variants[index].len()),
752 size: size.abi_align(align)
756 // A variant is absent if it's uninhabited and only has ZST fields.
757 // Present uninhabited variants only require space for their fields,
758 // but *not* an encoding of the discriminant (e.g. a tag value).
759 // See issue #49298 for more details on the need to leave space
760 // for non-ZST uninhabited data (mostly partial initialization).
761 let absent = |fields: &[TyLayout<'_>]| {
762 let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
763 let is_zst = fields.iter().all(|f| f.is_zst());
764 uninhabited && is_zst
766 let (present_first, present_second) = {
767 let mut present_variants = variants.iter_enumerated().filter_map(|(i, v)| {
774 (present_variants.next(), present_variants.next())
776 if present_first.is_none() {
777 // Uninhabited because it has no variants, or only absent ones.
778 return tcx.layout_raw(param_env.and(tcx.types.never));
781 let is_struct = !def.is_enum() ||
782 // Only one variant is present.
783 (present_second.is_none() &&
784 // Representation optimizations are allowed.
785 !def.repr.inhibit_enum_layout_opt());
787 // Struct, or univariant enum equivalent to a struct.
788 // (Typechecking will reject discriminant-sizing attrs.)
790 let v = present_first.unwrap();
791 let kind = if def.is_enum() || variants[v].len() == 0 {
792 StructKind::AlwaysSized
794 let param_env = tcx.param_env(def.did);
795 let last_field = def.variants[v].fields.last().unwrap();
796 let always_sized = tcx.type_of(last_field.did)
797 .is_sized(tcx.at(DUMMY_SP), param_env);
798 if !always_sized { StructKind::MaybeUnsized }
799 else { StructKind::AlwaysSized }
802 let mut st = univariant_uninterned(&variants[v], &def.repr, kind)?;
803 st.variants = Variants::Single { index: v };
804 let (start, end) = self.tcx.layout_scalar_valid_range(def.did);
806 Abi::Scalar(ref mut scalar) |
807 Abi::ScalarPair(ref mut scalar, _) => {
808 // the asserts ensure that we are not using the
809 // `#[rustc_layout_scalar_valid_range(n)]`
810 // attribute to widen the range of anything as that would probably
811 // result in UB somewhere
812 if let Bound::Included(start) = start {
813 assert!(*scalar.valid_range.start() <= start);
814 scalar.valid_range = start..=*scalar.valid_range.end();
816 if let Bound::Included(end) = end {
817 assert!(*scalar.valid_range.end() >= end);
818 scalar.valid_range = *scalar.valid_range.start()..=end;
822 start == Bound::Unbounded && end == Bound::Unbounded,
823 "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
828 return Ok(tcx.intern_layout(st));
831 // The current code for niche-filling relies on variant indices
832 // instead of actual discriminants, so dataful enums with
833 // explicit discriminants (RFC #2363) would misbehave.
834 let no_explicit_discriminants = def.variants.iter_enumerated()
835 .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
837 // Niche-filling enum optimization.
838 if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
839 let mut dataful_variant = None;
840 let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
842 // Find one non-ZST variant.
843 'variants: for (v, fields) in variants.iter_enumerated() {
849 if dataful_variant.is_none() {
850 dataful_variant = Some(v);
853 dataful_variant = None;
858 niche_variants = *niche_variants.start().min(&v)..=v;
861 if niche_variants.start() > niche_variants.end() {
862 dataful_variant = None;
865 if let Some(i) = dataful_variant {
867 niche_variants.end().as_u32() - niche_variants.start().as_u32() + 1
869 for (field_index, &field) in variants[i].iter().enumerate() {
870 let niche = match self.find_niche(field)? {
871 Some(niche) => niche,
874 let (niche_start, niche_scalar) = match niche.reserve(self, count) {
879 let mut align = dl.aggregate_align;
880 let st = variants.iter_enumerated().map(|(j, v)| {
881 let mut st = univariant_uninterned(v,
882 &def.repr, StructKind::AlwaysSized)?;
883 st.variants = Variants::Single { index: j };
885 align = align.max(st.align);
888 }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
890 let offset = st[i].fields.offset(field_index) + niche.offset;
891 let size = st[i].size;
893 let mut abi = match st[i].abi {
894 Abi::Scalar(_) => Abi::Scalar(niche_scalar.clone()),
895 Abi::ScalarPair(ref first, ref second) => {
896 // We need to use scalar_unit to reset the
897 // valid range to the maximal one for that
898 // primitive, because only the niche is
899 // guaranteed to be initialised, not the
901 if offset.bytes() == 0 {
903 niche_scalar.clone(),
904 scalar_unit(second.value),
908 scalar_unit(first.value),
909 niche_scalar.clone(),
913 _ => Abi::Aggregate { sized: true },
916 if st.iter().all(|v| v.abi.is_uninhabited()) {
917 abi = Abi::Uninhabited;
920 return Ok(tcx.intern_layout(LayoutDetails {
921 variants: Variants::NicheFilling {
928 fields: FieldPlacement::Arbitrary {
929 offsets: vec![offset],
930 memory_index: vec![0]
940 let (mut min, mut max) = (i128::max_value(), i128::min_value());
941 let discr_type = def.repr.discr_type();
942 let bits = Integer::from_attr(self, discr_type).size().bits();
943 for (i, discr) in def.discriminants(tcx) {
944 if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
947 let mut x = discr.val as i128;
948 if discr_type.is_signed() {
949 // sign extend the raw representation to be an i128
950 x = (x << (128 - bits)) >> (128 - bits);
952 if x < min { min = x; }
953 if x > max { max = x; }
955 // We might have no inhabited variants, so pretend there's at least one.
956 if (min, max) == (i128::max_value(), i128::min_value()) {
960 assert!(min <= max, "discriminant range is {}...{}", min, max);
961 let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
963 let mut align = dl.aggregate_align;
964 let mut size = Size::ZERO;
966 // We're interested in the smallest alignment, so start large.
967 let mut start_align = Align::from_bytes(256, 256).unwrap();
968 assert_eq!(Integer::for_abi_align(dl, start_align), None);
970 // repr(C) on an enum tells us to make a (tag, union) layout,
971 // so we need to grow the prefix alignment to be at least
972 // the alignment of the union. (This value is used both for
973 // determining the alignment of the overall enum, and the
974 // determining the alignment of the payload after the tag.)
975 let mut prefix_align = min_ity.align(dl);
977 for fields in &variants {
978 for field in fields {
979 prefix_align = prefix_align.max(field.align);
984 // Create the set of structs that represent each variant.
985 let mut layout_variants = variants.iter_enumerated().map(|(i, field_layouts)| {
986 let mut st = univariant_uninterned(&field_layouts,
987 &def.repr, StructKind::Prefixed(min_ity.size(), prefix_align))?;
988 st.variants = Variants::Single { index: i };
989 // Find the first field we can't move later
990 // to make room for a larger discriminant.
991 for field in st.fields.index_by_increasing_offset().map(|j| field_layouts[j]) {
992 if !field.is_zst() || field.align.abi() != 1 {
993 start_align = start_align.min(field.align);
997 size = cmp::max(size, st.size);
998 align = align.max(st.align);
1000 }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
1002 // Align the maximum variant size to the largest alignment.
1003 size = size.abi_align(align);
1005 if size.bytes() >= dl.obj_size_bound() {
1006 return Err(LayoutError::SizeOverflow(ty));
1009 let typeck_ity = Integer::from_attr(dl, def.repr.discr_type());
1010 if typeck_ity < min_ity {
1011 // It is a bug if Layout decided on a greater discriminant size than typeck for
1012 // some reason at this point (based on values discriminant can take on). Mostly
1013 // because this discriminant will be loaded, and then stored into variable of
1014 // type calculated by typeck. Consider such case (a bug): typeck decided on
1015 // byte-sized discriminant, but layout thinks we need a 16-bit to store all
1016 // discriminant values. That would be a bug, because then, in codegen, in order
1017 // to store this 16-bit discriminant into 8-bit sized temporary some of the
1018 // space necessary to represent would have to be discarded (or layout is wrong
1019 // on thinking it needs 16 bits)
1020 bug!("layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
1021 min_ity, typeck_ity);
1022 // However, it is fine to make discr type however large (as an optimisation)
1023 // after this point – we’ll just truncate the value we load in codegen.
1026 // Check to see if we should use a different type for the
1027 // discriminant. We can safely use a type with the same size
1028 // as the alignment of the first field of each variant.
1029 // We increase the size of the discriminant to avoid LLVM copying
1030 // padding when it doesn't need to. This normally causes unaligned
1031 // load/stores and excessive memcpy/memset operations. By using a
1032 // bigger integer size, LLVM can be sure about its contents and
1033 // won't be so conservative.
1035 // Use the initial field alignment
1036 let mut ity = if def.repr.c() || def.repr.int.is_some() {
1039 Integer::for_abi_align(dl, start_align).unwrap_or(min_ity)
1042 // If the alignment is not larger than the chosen discriminant size,
1043 // don't use the alignment as the final size.
1047 // Patch up the variants' first few fields.
1048 let old_ity_size = min_ity.size();
1049 let new_ity_size = ity.size();
1050 for variant in &mut layout_variants {
1051 match variant.fields {
1052 FieldPlacement::Arbitrary { ref mut offsets, .. } => {
1054 if *i <= old_ity_size {
1055 assert_eq!(*i, old_ity_size);
1059 // We might be making the struct larger.
1060 if variant.size <= old_ity_size {
1061 variant.size = new_ity_size;
1069 let tag_mask = !0u128 >> (128 - ity.size().bits());
1071 value: Int(ity, signed),
1072 valid_range: (min as u128 & tag_mask)..=(max as u128 & tag_mask),
1074 let mut abi = Abi::Aggregate { sized: true };
1075 if tag.value.size(dl) == size {
1076 abi = Abi::Scalar(tag.clone());
1078 // Try to use a ScalarPair for all tagged enums.
1079 let mut common_prim = None;
1080 for (field_layouts, layout_variant) in variants.iter().zip(&layout_variants) {
1081 let offsets = match layout_variant.fields {
1082 FieldPlacement::Arbitrary { ref offsets, .. } => offsets,
1085 let mut fields = field_layouts
1088 .filter(|p| !p.0.is_zst());
1089 let (field, offset) = match (fields.next(), fields.next()) {
1090 (None, None) => continue,
1091 (Some(pair), None) => pair,
1097 let prim = match field.details.abi {
1098 Abi::Scalar(ref scalar) => scalar.value,
1104 if let Some(pair) = common_prim {
1105 // This is pretty conservative. We could go fancier
1106 // by conflating things like i32 and u32, or even
1107 // realising that (u8, u8) could just cohabit with
1109 if pair != (prim, offset) {
1114 common_prim = Some((prim, offset));
1117 if let Some((prim, offset)) = common_prim {
1118 let pair = scalar_pair(tag.clone(), scalar_unit(prim));
1119 let pair_offsets = match pair.fields {
1120 FieldPlacement::Arbitrary {
1124 assert_eq!(memory_index, &[0, 1]);
1129 if pair_offsets[0] == Size::ZERO &&
1130 pair_offsets[1] == *offset &&
1131 align == pair.align &&
1133 // We can use `ScalarPair` only when it matches our
1134 // already computed layout (including `#[repr(C)]`).
1140 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
1141 abi = Abi::Uninhabited;
1144 tcx.intern_layout(LayoutDetails {
1145 variants: Variants::Tagged {
1147 variants: layout_variants,
1149 fields: FieldPlacement::Arbitrary {
1150 offsets: vec![Size::ZERO],
1151 memory_index: vec![0]
1159 // Types with no meaningful known layout.
1160 ty::Projection(_) | ty::Opaque(..) => {
1161 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1162 if ty == normalized {
1163 return Err(LayoutError::Unknown(ty));
1165 tcx.layout_raw(param_env.and(normalized))?
1169 ty::UnnormalizedProjection(..) |
1170 ty::GeneratorWitness(..) |
1172 bug!("LayoutDetails::compute: unexpected type `{}`", ty)
1175 ty::Param(_) | ty::Error => {
1176 return Err(LayoutError::Unknown(ty));
1181 /// This is invoked by the `layout_raw` query to record the final
1182 /// layout of each type.
1184 fn record_layout_for_printing(&self, layout: TyLayout<'tcx>) {
1185 // If we are running with `-Zprint-type-sizes`, record layouts for
1186 // dumping later. Ignore layouts that are done with non-empty
1187 // environments or non-monomorphic layouts, as the user only wants
1188 // to see the stuff resulting from the final codegen session.
1190 !self.tcx.sess.opts.debugging_opts.print_type_sizes ||
1191 layout.ty.has_param_types() ||
1192 layout.ty.has_self_ty() ||
1193 !self.param_env.caller_bounds.is_empty()
1198 self.record_layout_for_printing_outlined(layout)
1201 fn record_layout_for_printing_outlined(&self, layout: TyLayout<'tcx>) {
1202 // (delay format until we actually need it)
1203 let record = |kind, packed, opt_discr_size, variants| {
1204 let type_desc = format!("{:?}", layout.ty);
1205 self.tcx.sess.code_stats.borrow_mut().record_type_size(kind,
1214 let adt_def = match layout.ty.sty {
1215 ty::Adt(ref adt_def, _) => {
1216 debug!("print-type-size t: `{:?}` process adt", layout.ty);
1220 ty::Closure(..) => {
1221 debug!("print-type-size t: `{:?}` record closure", layout.ty);
1222 record(DataTypeKind::Closure, false, None, vec![]);
1227 debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
1232 let adt_kind = adt_def.adt_kind();
1233 let adt_packed = adt_def.repr.packed();
1235 let build_variant_info = |n: Option<ast::Name>,
1237 layout: TyLayout<'tcx>| {
1238 let mut min_size = Size::ZERO;
1239 let field_info: Vec<_> = flds.iter().enumerate().map(|(i, &name)| {
1240 match layout.field(self, i) {
1242 bug!("no layout found for field {}: `{:?}`", name, err);
1244 Ok(field_layout) => {
1245 let offset = layout.fields.offset(i);
1246 let field_end = offset + field_layout.size;
1247 if min_size < field_end {
1248 min_size = field_end;
1250 session::FieldInfo {
1251 name: name.to_string(),
1252 offset: offset.bytes(),
1253 size: field_layout.size.bytes(),
1254 align: field_layout.align.abi(),
1260 session::VariantInfo {
1261 name: n.map(|n|n.to_string()),
1262 kind: if layout.is_unsized() {
1263 session::SizeKind::Min
1265 session::SizeKind::Exact
1267 align: layout.align.abi(),
1268 size: if min_size.bytes() == 0 {
1277 match layout.variants {
1278 Variants::Single { index } => {
1279 debug!("print-type-size `{:#?}` variant {}",
1280 layout, adt_def.variants[index].name);
1281 if !adt_def.variants.is_empty() {
1282 let variant_def = &adt_def.variants[index];
1283 let fields: Vec<_> =
1284 variant_def.fields.iter().map(|f| f.ident.name).collect();
1285 record(adt_kind.into(),
1288 vec![build_variant_info(Some(variant_def.name),
1292 // (This case arises for *empty* enums; so give it
1294 record(adt_kind.into(), adt_packed, None, vec![]);
1298 Variants::NicheFilling { .. } |
1299 Variants::Tagged { .. } => {
1300 debug!("print-type-size `{:#?}` adt general variants def {}",
1301 layout.ty, adt_def.variants.len());
1302 let variant_infos: Vec<_> =
1303 adt_def.variants.iter_enumerated().map(|(i, variant_def)| {
1304 let fields: Vec<_> =
1305 variant_def.fields.iter().map(|f| f.ident.name).collect();
1306 build_variant_info(Some(variant_def.name),
1308 layout.for_variant(self, i))
1311 record(adt_kind.into(), adt_packed, match layout.variants {
1312 Variants::Tagged { ref tag, .. } => Some(tag.value.size(self)),
1320 /// Type size "skeleton", i.e. the only information determining a type's size.
1321 /// While this is conservative, (aside from constant sizes, only pointers,
1322 /// newtypes thereof and null pointer optimized enums are allowed), it is
1323 /// enough to statically check common use cases of transmute.
1324 #[derive(Copy, Clone, Debug)]
1325 pub enum SizeSkeleton<'tcx> {
1326 /// Any statically computable Layout.
1329 /// A potentially-fat pointer.
1331 /// If true, this pointer is never null.
1333 /// The type which determines the unsized metadata, if any,
1334 /// of this pointer. Either a type parameter or a projection
1335 /// depending on one, with regions erased.
1340 impl<'a, 'tcx> SizeSkeleton<'tcx> {
1341 pub fn compute(ty: Ty<'tcx>,
1342 tcx: TyCtxt<'a, 'tcx, 'tcx>,
1343 param_env: ty::ParamEnv<'tcx>)
1344 -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
1345 debug_assert!(!ty.has_infer_types());
1347 // First try computing a static layout.
1348 let err = match tcx.layout_of(param_env.and(ty)) {
1350 return Ok(SizeSkeleton::Known(layout.size));
1356 ty::Ref(_, pointee, _) |
1357 ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1358 let non_zero = !ty.is_unsafe_ptr();
1359 let tail = tcx.struct_tail(pointee);
1361 ty::Param(_) | ty::Projection(_) => {
1362 debug_assert!(tail.has_param_types() || tail.has_self_ty());
1363 Ok(SizeSkeleton::Pointer {
1365 tail: tcx.erase_regions(&tail)
1369 bug!("SizeSkeleton::compute({}): layout errored ({}), yet \
1370 tail `{}` is not a type parameter or a projection",
1376 ty::Adt(def, substs) => {
1377 // Only newtypes and enums w/ nullable pointer optimization.
1378 if def.is_union() || def.variants.is_empty() || def.variants.len() > 2 {
1382 // Get a zero-sized variant or a pointer newtype.
1383 let zero_or_ptr_variant = |i| {
1384 let i = VariantIdx::new(i);
1385 let fields = def.variants[i].fields.iter().map(|field| {
1386 SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env)
1389 for field in fields {
1392 SizeSkeleton::Known(size) => {
1393 if size.bytes() > 0 {
1397 SizeSkeleton::Pointer {..} => {
1408 let v0 = zero_or_ptr_variant(0)?;
1410 if def.variants.len() == 1 {
1411 if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
1412 return Ok(SizeSkeleton::Pointer {
1413 non_zero: non_zero || match tcx.layout_scalar_valid_range(def.did) {
1414 (Bound::Included(start), Bound::Unbounded) => start > 0,
1415 (Bound::Included(start), Bound::Included(end)) =>
1416 0 < start && start < end,
1426 let v1 = zero_or_ptr_variant(1)?;
1427 // Nullable pointer enum optimization.
1429 (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None) |
1430 (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
1431 Ok(SizeSkeleton::Pointer {
1440 ty::Projection(_) | ty::Opaque(..) => {
1441 let normalized = tcx.normalize_erasing_regions(param_env, ty);
1442 if ty == normalized {
1445 SizeSkeleton::compute(normalized, tcx, param_env)
1453 pub fn same_size(self, other: SizeSkeleton<'_>) -> bool {
1454 match (self, other) {
1455 (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
1456 (SizeSkeleton::Pointer { tail: a, .. },
1457 SizeSkeleton::Pointer { tail: b, .. }) => a == b,
1463 pub trait HasTyCtxt<'tcx>: HasDataLayout {
1464 fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx>;
1467 impl<'a, 'gcx, 'tcx> HasDataLayout for TyCtxt<'a, 'gcx, 'tcx> {
1468 fn data_layout(&self) -> &TargetDataLayout {
1473 impl<'a, 'gcx, 'tcx> HasTyCtxt<'gcx> for TyCtxt<'a, 'gcx, 'tcx> {
1474 fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> {
1479 impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
1480 fn data_layout(&self) -> &TargetDataLayout {
1481 self.tcx.data_layout()
1485 impl<'gcx, 'tcx, T: HasTyCtxt<'gcx>> HasTyCtxt<'gcx> for LayoutCx<'tcx, T> {
1486 fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> {
1491 pub trait MaybeResult<T> {
1492 fn from_ok(x: T) -> Self;
1493 fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self;
1496 impl<T> MaybeResult<T> for T {
1497 fn from_ok(x: T) -> Self {
1500 fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self {
1505 impl<T, E> MaybeResult<T> for Result<T, E> {
1506 fn from_ok(x: T) -> Self {
1509 fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self {
1514 pub type TyLayout<'tcx> = ::rustc_target::abi::TyLayout<'tcx, Ty<'tcx>>;
1516 impl<'a, 'tcx> LayoutOf for LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
1518 type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
1520 /// Computes the layout of a type. Note that this implicitly
1521 /// executes in "reveal all" mode.
1522 fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
1523 let param_env = self.param_env.with_reveal_all();
1524 let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1525 let details = self.tcx.layout_raw(param_env.and(ty))?;
1526 let layout = TyLayout {
1531 // NB: This recording is normally disabled; when enabled, it
1532 // can however trigger recursive invocations of `layout_of`.
1533 // Therefore, we execute it *after* the main query has
1534 // completed, to avoid problems around recursive structures
1535 // and the like. (Admittedly, I wasn't able to reproduce a problem
1536 // here, but it seems like the right thing to do. -nmatsakis)
1537 self.record_layout_for_printing(layout);
1543 impl<'a, 'tcx> LayoutOf for LayoutCx<'tcx, ty::query::TyCtxtAt<'a, 'tcx, 'tcx>> {
1545 type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
1547 /// Computes the layout of a type. Note that this implicitly
1548 /// executes in "reveal all" mode.
1549 fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
1550 let param_env = self.param_env.with_reveal_all();
1551 let ty = self.tcx.normalize_erasing_regions(param_env, ty);
1552 let details = self.tcx.layout_raw(param_env.and(ty))?;
1553 let layout = TyLayout {
1558 // NB: This recording is normally disabled; when enabled, it
1559 // can however trigger recursive invocations of `layout_of`.
1560 // Therefore, we execute it *after* the main query has
1561 // completed, to avoid problems around recursive structures
1562 // and the like. (Admittedly, I wasn't able to reproduce a problem
1563 // here, but it seems like the right thing to do. -nmatsakis)
1566 param_env: self.param_env
1568 cx.record_layout_for_printing(layout);
1574 // Helper (inherent) `layout_of` methods to avoid pushing `LayoutCx` to users.
1575 impl TyCtxt<'a, 'tcx, '_> {
1576 /// Computes the layout of a type. Note that this implicitly
1577 /// executes in "reveal all" mode.
1579 pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
1580 -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
1582 tcx: self.global_tcx(),
1583 param_env: param_env_and_ty.param_env
1585 cx.layout_of(param_env_and_ty.value)
1589 impl ty::query::TyCtxtAt<'a, 'tcx, '_> {
1590 /// Computes the layout of a type. Note that this implicitly
1591 /// executes in "reveal all" mode.
1593 pub fn layout_of(self, param_env_and_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
1594 -> Result<TyLayout<'tcx>, LayoutError<'tcx>> {
1596 tcx: self.global_tcx().at(self.span),
1597 param_env: param_env_and_ty.param_env
1599 cx.layout_of(param_env_and_ty.value)
1603 impl<'a, 'tcx, C> TyLayoutMethods<'tcx, C> for Ty<'tcx>
1604 where C: LayoutOf<Ty = Ty<'tcx>> + HasTyCtxt<'tcx>,
1605 C::TyLayout: MaybeResult<TyLayout<'tcx>>
1607 fn for_variant(this: TyLayout<'tcx>, cx: &C, variant_index: VariantIdx) -> TyLayout<'tcx> {
1608 let details = match this.variants {
1609 Variants::Single { index } if index == variant_index => this.details,
1611 Variants::Single { index } => {
1612 // Deny calling for_variant more than once for non-Single enums.
1613 cx.layout_of(this.ty).map_same(|layout| {
1614 assert_eq!(layout.variants, Variants::Single { index });
1618 let fields = match this.ty.sty {
1619 ty::Adt(def, _) => def.variants[variant_index].fields.len(),
1623 tcx.intern_layout(LayoutDetails {
1624 variants: Variants::Single { index: variant_index },
1625 fields: FieldPlacement::Union(fields),
1626 abi: Abi::Uninhabited,
1627 align: tcx.data_layout.i8_align,
1632 Variants::NicheFilling { ref variants, .. } |
1633 Variants::Tagged { ref variants, .. } => {
1634 &variants[variant_index]
1638 assert_eq!(details.variants, Variants::Single { index: variant_index });
1646 fn field(this: TyLayout<'tcx>, cx: &C, i: usize) -> C::TyLayout {
1648 cx.layout_of(match this.ty.sty {
1657 ty::GeneratorWitness(..) |
1659 ty::Dynamic(..) => {
1660 bug!("TyLayout::field_type({:?}): not applicable", this)
1663 // Potentially-fat pointers.
1664 ty::Ref(_, pointee, _) |
1665 ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1666 assert!(i < this.fields.count());
1668 // Reuse the fat *T type as its own thin pointer data field.
1669 // This provides information about e.g. DST struct pointees
1670 // (which may have no non-DST form), and will work as long
1671 // as the `Abi` or `FieldPlacement` is checked by users.
1673 let nil = tcx.mk_unit();
1674 let ptr_ty = if this.ty.is_unsafe_ptr() {
1677 tcx.mk_mut_ref(tcx.types.re_static, nil)
1679 return cx.layout_of(ptr_ty).map_same(|mut ptr_layout| {
1680 ptr_layout.ty = this.ty;
1685 match tcx.struct_tail(pointee).sty {
1687 ty::Str => tcx.types.usize,
1688 ty::Dynamic(_, _) => {
1690 tcx.types.re_static,
1691 tcx.mk_array(tcx.types.usize, 3),
1693 /* FIXME use actual fn pointers
1694 Warning: naively computing the number of entries in the
1695 vtable by counting the methods on the trait + methods on
1696 all parent traits does not work, because some methods can
1697 be not object safe and thus excluded from the vtable.
1698 Increase this counter if you tried to implement this but
1699 failed to do it without duplicating a lot of code from
1700 other places in the compiler: 2
1702 tcx.mk_array(tcx.types.usize, 3),
1703 tcx.mk_array(Option<fn()>),
1707 _ => bug!("TyLayout::field_type({:?}): not applicable", this)
1711 // Arrays and slices.
1712 ty::Array(element, _) |
1713 ty::Slice(element) => element,
1714 ty::Str => tcx.types.u8,
1716 // Tuples, generators and closures.
1717 ty::Closure(def_id, ref substs) => {
1718 substs.upvar_tys(def_id, tcx).nth(i).unwrap()
1721 ty::Generator(def_id, ref substs, _) => {
1722 substs.field_tys(def_id, tcx).nth(i).unwrap()
1725 ty::Tuple(tys) => tys[i],
1727 // SIMD vector types.
1728 ty::Adt(def, ..) if def.repr.simd() => {
1729 this.ty.simd_type(tcx)
1733 ty::Adt(def, substs) => {
1734 match this.variants {
1735 Variants::Single { index } => {
1736 def.variants[index].fields[i].ty(tcx, substs)
1739 // Discriminant field for enums (where applicable).
1740 Variants::Tagged { tag: ref discr, .. } |
1741 Variants::NicheFilling { niche: ref discr, .. } => {
1743 let layout = LayoutDetails::scalar(cx, discr.clone());
1744 return MaybeResult::from_ok(TyLayout {
1745 details: tcx.intern_layout(layout),
1746 ty: discr.value.to_ty(tcx)
1752 ty::Projection(_) | ty::UnnormalizedProjection(..) | ty::Bound(..) |
1753 ty::Opaque(..) | ty::Param(_) | ty::Infer(_) | ty::Error => {
1754 bug!("TyLayout::field_type: unexpected type `{}`", this.ty)
1767 fn reserve<'a, 'tcx>(
1769 cx: &LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>>,
1771 ) -> Option<(u128, Scalar)> {
1772 if count > self.available {
1775 let Scalar { value, valid_range: ref v } = self.scalar;
1776 let bits = value.size(cx).bits();
1777 assert!(bits <= 128);
1778 let max_value = !0u128 >> (128 - bits);
1779 let start = v.end().wrapping_add(1) & max_value;
1780 let end = v.end().wrapping_add(count) & max_value;
1781 Some((start, Scalar { value, valid_range: *v.start()..=end }))
1785 impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
1786 /// Find the offset of a niche leaf field, starting from
1787 /// the given type and recursing through aggregates.
1788 // FIXME(eddyb) traverse already optimized enums.
1789 fn find_niche(&self, layout: TyLayout<'tcx>) -> Result<Option<Niche>, LayoutError<'tcx>> {
1790 let scalar_niche = |scalar: &Scalar, offset| {
1791 let Scalar { value, valid_range: ref v } = *scalar;
1793 let bits = value.size(self).bits();
1794 assert!(bits <= 128);
1795 let max_value = !0u128 >> (128 - bits);
1797 // Find out how many values are outside the valid range.
1798 let available = if v.start() <= v.end() {
1799 v.start() + (max_value - v.end())
1801 v.start() - v.end() - 1
1804 // Give up if there is no niche value available.
1809 Some(Niche { offset, scalar: scalar.clone(), available })
1812 // Locals variables which live across yields are stored
1813 // in the generator type as fields. These may be uninitialized
1814 // so we don't look for niches there.
1815 if let ty::Generator(..) = layout.ty.sty {
1820 Abi::Scalar(ref scalar) => {
1821 return Ok(scalar_niche(scalar, Size::ZERO));
1823 Abi::ScalarPair(ref a, ref b) => {
1824 // HACK(nox): We iter on `b` and then `a` because `max_by_key`
1825 // returns the last maximum.
1826 let niche = iter::once((b, a.value.size(self).abi_align(b.value.align(self))))
1827 .chain(iter::once((a, Size::ZERO)))
1828 .filter_map(|(scalar, offset)| scalar_niche(scalar, offset))
1829 .max_by_key(|niche| niche.available);
1832 Abi::Vector { ref element, .. } => {
1833 return Ok(scalar_niche(element, Size::ZERO));
1838 // Perhaps one of the fields is non-zero, let's recurse and find out.
1839 if let FieldPlacement::Union(_) = layout.fields {
1840 // Only Rust enums have safe-to-inspect fields
1841 // (a discriminant), other unions are unsafe.
1842 if let Variants::Single { .. } = layout.variants {
1846 if let FieldPlacement::Array { .. } = layout.fields {
1847 if layout.fields.count() > 0 {
1848 return self.find_niche(layout.field(self, 0)?);
1853 let mut niche = None;
1854 let mut available = 0;
1855 for i in 0..layout.fields.count() {
1856 if let Some(mut c) = self.find_niche(layout.field(self, i)?)? {
1857 if c.available > available {
1858 available = c.available;
1859 c.offset += layout.fields.offset(i);
1868 impl<'a> HashStable<StableHashingContext<'a>> for Variants {
1869 fn hash_stable<W: StableHasherResult>(&self,
1870 hcx: &mut StableHashingContext<'a>,
1871 hasher: &mut StableHasher<W>) {
1872 use ty::layout::Variants::*;
1873 mem::discriminant(self).hash_stable(hcx, hasher);
1876 Single { index } => {
1877 index.hash_stable(hcx, hasher);
1883 tag.hash_stable(hcx, hasher);
1884 variants.hash_stable(hcx, hasher);
1893 dataful_variant.hash_stable(hcx, hasher);
1894 niche_variants.start().hash_stable(hcx, hasher);
1895 niche_variants.end().hash_stable(hcx, hasher);
1896 niche.hash_stable(hcx, hasher);
1897 niche_start.hash_stable(hcx, hasher);
1898 variants.hash_stable(hcx, hasher);
1904 impl<'a> HashStable<StableHashingContext<'a>> for FieldPlacement {
1905 fn hash_stable<W: StableHasherResult>(&self,
1906 hcx: &mut StableHashingContext<'a>,
1907 hasher: &mut StableHasher<W>) {
1908 use ty::layout::FieldPlacement::*;
1909 mem::discriminant(self).hash_stable(hcx, hasher);
1913 count.hash_stable(hcx, hasher);
1915 Array { count, stride } => {
1916 count.hash_stable(hcx, hasher);
1917 stride.hash_stable(hcx, hasher);
1919 Arbitrary { ref offsets, ref memory_index } => {
1920 offsets.hash_stable(hcx, hasher);
1921 memory_index.hash_stable(hcx, hasher);
1927 impl<'a> HashStable<StableHashingContext<'a>> for VariantIdx {
1928 fn hash_stable<W: StableHasherResult>(
1930 hcx: &mut StableHashingContext<'a>,
1931 hasher: &mut StableHasher<W>,
1933 self.as_u32().hash_stable(hcx, hasher)
1937 impl<'a> HashStable<StableHashingContext<'a>> for Abi {
1938 fn hash_stable<W: StableHasherResult>(&self,
1939 hcx: &mut StableHashingContext<'a>,
1940 hasher: &mut StableHasher<W>) {
1941 use ty::layout::Abi::*;
1942 mem::discriminant(self).hash_stable(hcx, hasher);
1946 Scalar(ref value) => {
1947 value.hash_stable(hcx, hasher);
1949 ScalarPair(ref a, ref b) => {
1950 a.hash_stable(hcx, hasher);
1951 b.hash_stable(hcx, hasher);
1953 Vector { ref element, count } => {
1954 element.hash_stable(hcx, hasher);
1955 count.hash_stable(hcx, hasher);
1957 Aggregate { sized } => {
1958 sized.hash_stable(hcx, hasher);
1964 impl<'a> HashStable<StableHashingContext<'a>> for Scalar {
1965 fn hash_stable<W: StableHasherResult>(&self,
1966 hcx: &mut StableHashingContext<'a>,
1967 hasher: &mut StableHasher<W>) {
1968 let Scalar { value, ref valid_range } = *self;
1969 value.hash_stable(hcx, hasher);
1970 valid_range.start().hash_stable(hcx, hasher);
1971 valid_range.end().hash_stable(hcx, hasher);
1975 impl_stable_hash_for!(struct ::ty::layout::LayoutDetails {
1983 impl_stable_hash_for!(enum ::ty::layout::Integer {
1991 impl_stable_hash_for!(enum ::ty::layout::Primitive {
1992 Int(integer, signed),
1997 impl<'gcx> HashStable<StableHashingContext<'gcx>> for Align {
1998 fn hash_stable<W: StableHasherResult>(&self,
1999 hcx: &mut StableHashingContext<'gcx>,
2000 hasher: &mut StableHasher<W>) {
2001 self.abi().hash_stable(hcx, hasher);
2002 self.pref().hash_stable(hcx, hasher);
2006 impl<'gcx> HashStable<StableHashingContext<'gcx>> for Size {
2007 fn hash_stable<W: StableHasherResult>(&self,
2008 hcx: &mut StableHashingContext<'gcx>,
2009 hasher: &mut StableHasher<W>) {
2010 self.bytes().hash_stable(hcx, hasher);
2014 impl<'a, 'gcx> HashStable<StableHashingContext<'a>> for LayoutError<'gcx>
2016 fn hash_stable<W: StableHasherResult>(&self,
2017 hcx: &mut StableHashingContext<'a>,
2018 hasher: &mut StableHasher<W>) {
2019 use ty::layout::LayoutError::*;
2020 mem::discriminant(self).hash_stable(hcx, hasher);
2024 SizeOverflow(t) => t.hash_stable(hcx, hasher)