return;
}
let TyAndLayout { ty, layout } = place.layout();
- let rustc_target::abi::Layout { size, align, abi: _, variants: _, fields: _, largest_niche: _ } =
- layout;
+ let rustc_target::abi::LayoutS {
+ size,
+ align,
+ abi: _,
+ variants: _,
+ fields: _,
+ largest_niche: _,
+ } = layout.0.0;
let (kind, extra) = match *place.inner() {
CPlaceInner::Var(place_local, var) => {
};
raw_eq, (v lhs_ref, v rhs_ref) {
- let size = fx.layout_of(substs.type_at(0)).layout.size;
+ let size = fx.layout_of(substs.type_at(0)).layout.size();
// FIXME add and use emit_small_memcmp
let is_eq_value =
if size == Size::ZERO {
use rustc_target::abi::Abi::*;
let tp_ty = substs.type_at(0);
let layout = self.layout_of(tp_ty).layout;
- let _use_integer_compare = match layout.abi {
+ let _use_integer_compare = match layout.abi() {
Scalar(_) | ScalarPair(_, _) => true,
Uninhabited | Vector { .. } => false,
Aggregate { .. } => {
// For rusty ABIs, small aggregates are actually passed
// as `RegKind::Integer` (see `FnAbi::adjust_for_abi`),
// so we re-use that same threshold here.
- layout.size <= self.data_layout().pointer_size * 2
+ layout.size() <= self.data_layout().pointer_size * 2
}
};
let a = args[0].immediate();
let b = args[1].immediate();
- if layout.size.bytes() == 0 {
+ if layout.size().bytes() == 0 {
self.const_bool(true)
}
/*else if use_integer_compare {
let void_ptr_type = self.context.new_type::<*const ()>();
let a_ptr = self.bitcast(a, void_ptr_type);
let b_ptr = self.bitcast(b, void_ptr_type);
- let n = self.context.new_cast(None, self.const_usize(layout.size.bytes()), self.sizet_type);
+ let n = self.context.new_cast(None, self.const_usize(layout.size().bytes()), self.sizet_type);
let builtin = self.context.get_builtin_function("memcmp");
let cmp = self.context.new_call(None, builtin, &[a_ptr, b_ptr, n]);
self.icmp(IntPredicate::IntEQ, cmp, self.const_i32(0))
use abi::Abi::*;
let tp_ty = substs.type_at(0);
let layout = self.layout_of(tp_ty).layout;
- let use_integer_compare = match layout.abi {
+ let use_integer_compare = match layout.abi() {
Scalar(_) | ScalarPair(_, _) => true,
Uninhabited | Vector { .. } => false,
Aggregate { .. } => {
// For rusty ABIs, small aggregates are actually passed
// as `RegKind::Integer` (see `FnAbi::adjust_for_abi`),
// so we re-use that same threshold here.
- layout.size <= self.data_layout().pointer_size * 2
+ layout.size() <= self.data_layout().pointer_size * 2
}
};
let a = args[0].immediate();
let b = args[1].immediate();
- if layout.size.bytes() == 0 {
+ if layout.size().bytes() == 0 {
self.const_bool(true)
} else if use_integer_compare {
- let integer_ty = self.type_ix(layout.size.bits());
+ let integer_ty = self.type_ix(layout.size().bits());
let ptr_ty = self.type_ptr_to(integer_ty);
let a_ptr = self.bitcast(a, ptr_ty);
- let a_val = self.load(integer_ty, a_ptr, layout.align.abi);
+ let a_val = self.load(integer_ty, a_ptr, layout.align().abi);
let b_ptr = self.bitcast(b, ptr_ty);
- let b_val = self.load(integer_ty, b_ptr, layout.align.abi);
+ let b_val = self.load(integer_ty, b_ptr, layout.align().abi);
self.icmp(IntPredicate::IntEQ, a_val, b_val)
} else {
let i8p_ty = self.type_i8p();
let a_ptr = self.bitcast(a, i8p_ty);
let b_ptr = self.bitcast(b, i8p_ty);
- let n = self.const_usize(layout.size.bytes());
+ let n = self.const_usize(layout.size().bytes());
let cmp = self.call_intrinsic("memcmp", &[a_ptr, b_ptr, n]);
self.icmp(IntPredicate::IntEQ, cmp, self.const_i32(0))
}
// calculate the range of values for the dataful variant
let dataful_discriminant_range =
- dataful_variant_layout.largest_niche.unwrap().scalar.valid_range;
+ dataful_variant_layout.largest_niche().unwrap().scalar.valid_range;
let min = dataful_discriminant_range.start;
let min = tag.value.size(&tcx).truncate(min);
+use crate::stable_hasher::{HashStable, StableHasher};
use std::cmp::Ordering;
use std::hash::{Hash, Hasher};
use std::ops::Deref;
}
}
+impl<T, CTX> HashStable<CTX> for Interned<'_, T>
+where
+ T: HashStable<CTX>,
+{
+ fn hash_stable(&self, hcx: &mut CTX, hasher: &mut StableHasher) {
+ self.0.hash_stable(hcx, hasher);
+ }
+}
+
#[cfg(test)]
mod tests;
let compare_layouts = |a, b| -> Result<bool, LayoutError<'tcx>> {
debug!("compare_layouts({:?}, {:?})", a, b);
- let a_layout = &cx.layout_of(a)?.layout.abi;
- let b_layout = &cx.layout_of(b)?.layout.abi;
+ let a_layout = &cx.layout_of(a)?.layout.abi();
+ let b_layout = &cx.layout_of(b)?.layout.abi();
debug!(
"comparing layouts: {:?} == {:?} = {}",
a_layout,
let (largest, slargest, largest_index) = iter::zip(enum_definition.variants, variants)
.map(|(variant, variant_layout)| {
// Subtract the size of the enum tag.
- let bytes = variant_layout.size.bytes().saturating_sub(tag_size);
+ let bytes = variant_layout.size().bytes().saturating_sub(tag_size);
debug!("- variant `{}` is {} bytes large", variant.ident, bytes);
bytes
.layout;
// In both stdcall and fastcall, we always round up the argument size to the
// nearest multiple of 4 bytes.
- (layout.size.bytes_usize() + 3) & !3
+ (layout.size().bytes_usize() + 3) & !3
})
.sum()
}
macro_rules! arena_types {
($macro:path) => (
$macro!([
- [] layout: rustc_target::abi::Layout,
+ [] layout: rustc_target::abi::LayoutS<'tcx>,
[] fn_abi: rustc_target::abi::call::FnAbi<'tcx, rustc_middle::ty::Ty<'tcx>>,
// AdtDef are interned and compared by address
[decode] adt_def: rustc_middle::ty::AdtDef,
use rustc_span::source_map::{MultiSpan, SourceMap};
use rustc_span::symbol::{kw, sym, Ident, Symbol};
use rustc_span::{Span, DUMMY_SP};
-use rustc_target::abi::{Layout, TargetDataLayout, VariantIdx};
+use rustc_target::abi::{Layout, LayoutS, TargetDataLayout, VariantIdx};
use rustc_target::spec::abi;
use rustc_type_ir::TypeFlags;
const_: InternedSet<'tcx, ConstS<'tcx>>,
const_allocation: InternedSet<'tcx, Allocation>,
bound_variable_kinds: InternedSet<'tcx, List<ty::BoundVariableKind>>,
- layout: InternedSet<'tcx, Layout>,
+ layout: InternedSet<'tcx, LayoutS<'tcx>>,
adt_def: InternedSet<'tcx, AdtDef>,
}
region: mk_region(RegionKind): Region -> Region<'tcx>,
const_: mk_const(ConstS<'tcx>): Const -> Const<'tcx>,
const_allocation: intern_const_alloc(Allocation): ConstAllocation -> ConstAllocation<'tcx>,
+ layout: intern_layout(LayoutS<'tcx>): Layout -> Layout<'tcx>,
}
macro_rules! direct_interners_old {
// FIXME: eventually these should all be converted to `direct_interners`.
direct_interners_old! {
- layout: intern_layout(Layout),
adt_def: intern_adt_def(AdtDef),
}
use crate::ty::{self, subst::SubstsRef, ReprOptions, Ty, TyCtxt, TypeFoldable};
use rustc_ast as ast;
use rustc_attr as attr;
+use rustc_data_structures::intern::Interned;
use rustc_hir as hir;
use rustc_hir::lang_items::LangItem;
use rustc_index::bit_set::BitSet;
}
impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
- fn scalar_pair(&self, a: Scalar, b: Scalar) -> Layout {
+ fn scalar_pair(&self, a: Scalar, b: Scalar) -> LayoutS<'tcx> {
let dl = self.data_layout();
let b_align = b.value.align(dl);
let align = a.value.align(dl).max(b_align).max(dl.aggregate_align);
.chain(Niche::from_scalar(dl, Size::ZERO, a))
.max_by_key(|niche| niche.available(dl));
- Layout {
+ LayoutS {
variants: Variants::Single { index: VariantIdx::new(0) },
fields: FieldsShape::Arbitrary {
offsets: vec![Size::ZERO, b_offset],
fields: &[TyAndLayout<'_>],
repr: &ReprOptions,
kind: StructKind,
- ) -> Result<Layout, LayoutError<'tcx>> {
+ ) -> Result<LayoutS<'tcx>, LayoutError<'tcx>> {
let dl = self.data_layout();
let pack = repr.pack;
if pack.is_some() && repr.align.is_some() {
// Two non-ZST fields, and they're both scalars.
(
- Some((i, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(a), .. }, .. })),
- Some((j, &TyAndLayout { layout: &Layout { abi: Abi::Scalar(b), .. }, .. })),
+ Some((
+ i,
+ &TyAndLayout {
+ layout: Layout(Interned(&LayoutS { abi: Abi::Scalar(a), .. }, _)),
+ ..
+ },
+ )),
+ Some((
+ j,
+ &TyAndLayout {
+ layout: Layout(Interned(&LayoutS { abi: Abi::Scalar(b), .. }, _)),
+ ..
+ },
+ )),
None,
) => {
// Order by the memory placement, not source order.
abi = Abi::Uninhabited;
}
- Ok(Layout {
+ Ok(LayoutS {
variants: Variants::Single { index: VariantIdx::new(0) },
fields: FieldsShape::Arbitrary { offsets, memory_index },
abi,
})
}
- fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<&'tcx Layout, LayoutError<'tcx>> {
+ fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
let tcx = self.tcx;
let param_env = self.param_env;
let dl = self.data_layout();
assert!(size.bits() <= 128);
Scalar { value, valid_range: WrappingRange { start: 0, end: size.unsigned_int_max() } }
};
- let scalar = |value: Primitive| tcx.intern_layout(Layout::scalar(self, scalar_unit(value)));
+ let scalar =
+ |value: Primitive| tcx.intern_layout(LayoutS::scalar(self, scalar_unit(value)));
let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
Ok(match *ty.kind() {
// Basic scalars.
- ty::Bool => tcx.intern_layout(Layout::scalar(
+ ty::Bool => tcx.intern_layout(LayoutS::scalar(
self,
Scalar { value: Int(I8, false), valid_range: WrappingRange { start: 0, end: 1 } },
)),
- ty::Char => tcx.intern_layout(Layout::scalar(
+ ty::Char => tcx.intern_layout(LayoutS::scalar(
self,
Scalar {
value: Int(I32, false),
ty::FnPtr(_) => {
let mut ptr = scalar_unit(Pointer);
ptr.valid_range = ptr.valid_range.with_start(1);
- tcx.intern_layout(Layout::scalar(self, ptr))
+ tcx.intern_layout(LayoutS::scalar(self, ptr))
}
// The never type.
- ty::Never => tcx.intern_layout(Layout {
+ ty::Never => tcx.intern_layout(LayoutS {
variants: Variants::Single { index: VariantIdx::new(0) },
fields: FieldsShape::Primitive,
abi: Abi::Uninhabited,
let pointee = tcx.normalize_erasing_regions(param_env, pointee);
if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
- return Ok(tcx.intern_layout(Layout::scalar(self, data_ptr)));
+ return Ok(tcx.intern_layout(LayoutS::scalar(self, data_ptr)));
}
let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
let metadata = match unsized_part.kind() {
ty::Foreign(..) => {
- return Ok(tcx.intern_layout(Layout::scalar(self, data_ptr)));
+ return Ok(tcx.intern_layout(LayoutS::scalar(self, data_ptr)));
}
ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
ty::Dynamic(..) => {
let largest_niche = if count != 0 { element.largest_niche } else { None };
- tcx.intern_layout(Layout {
+ tcx.intern_layout(LayoutS {
variants: Variants::Single { index: VariantIdx::new(0) },
fields: FieldsShape::Array { stride: element.size, count },
abi,
}
ty::Slice(element) => {
let element = self.layout_of(element)?;
- tcx.intern_layout(Layout {
+ tcx.intern_layout(LayoutS {
variants: Variants::Single { index: VariantIdx::new(0) },
fields: FieldsShape::Array { stride: element.size, count: 0 },
abi: Abi::Aggregate { sized: false },
size: Size::ZERO,
})
}
- ty::Str => tcx.intern_layout(Layout {
+ ty::Str => tcx.intern_layout(LayoutS {
variants: Variants::Single { index: VariantIdx::new(0) },
fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
abi: Abi::Aggregate { sized: false },
// Extract the number of elements from the layout of the array field:
let Ok(TyAndLayout {
- layout: Layout { fields: FieldsShape::Array { count, .. }, .. },
+ layout: Layout(Interned(LayoutS { fields: FieldsShape::Array { count, .. }, .. }, _)),
..
}) = self.layout_of(f0_ty) else {
return Err(LayoutError::Unknown(ty));
FieldsShape::Array { stride: e_ly.size, count: e_len }
};
- tcx.intern_layout(Layout {
+ tcx.intern_layout(LayoutS {
variants: Variants::Single { index: VariantIdx::new(0) },
fields,
abi: Abi::Vector { element: e_abi, count: e_len },
align = align.min(AbiAndPrefAlign::new(pack));
}
- return Ok(tcx.intern_layout(Layout {
+ return Ok(tcx.intern_layout(LayoutS {
variants: Variants::Single { index },
fields: FieldsShape::Union(
NonZeroUsize::new(variants[index].len())
align = align.max(st.align);
- Ok(st)
+ Ok(tcx.intern_layout(st))
})
.collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
- let offset = st[i].fields.offset(field_index) + niche.offset;
- let size = st[i].size;
+ let offset = st[i].fields().offset(field_index) + niche.offset;
+ let size = st[i].size();
- let abi = if st.iter().all(|v| v.abi.is_uninhabited()) {
+ let abi = if st.iter().all(|v| v.abi().is_uninhabited()) {
Abi::Uninhabited
} else {
- match st[i].abi {
+ match st[i].abi() {
Abi::Scalar(_) => Abi::Scalar(niche_scalar),
Abi::ScalarPair(first, second) => {
// We need to use scalar_unit to reset the
let largest_niche = Niche::from_scalar(dl, offset, niche_scalar);
- niche_filling_layout = Some(Layout {
+ niche_filling_layout = Some(LayoutS {
variants: Variants::Multiple {
tag: niche_scalar,
tag_encoding: TagEncoding::Niche {
let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag);
- let tagged_layout = Layout {
+ let layout_variants =
+ layout_variants.into_iter().map(|v| tcx.intern_layout(v)).collect();
+
+ let tagged_layout = LayoutS {
variants: Variants::Multiple {
tag,
tag_encoding: TagEncoding::Direct,
ty: Ty<'tcx>,
def_id: hir::def_id::DefId,
substs: SubstsRef<'tcx>,
- ) -> Result<&'tcx Layout, LayoutError<'tcx>> {
+ ) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
use SavedLocalEligibility::*;
let tcx = self.tcx;
let subst_field = |ty: Ty<'tcx>| ty.subst(tcx, substs);
value: Primitive::Int(discr_int, false),
valid_range: WrappingRange { start: 0, end: max_discr },
};
- let tag_layout = self.tcx.intern_layout(Layout::scalar(self, tag));
+ let tag_layout = self.tcx.intern_layout(LayoutS::scalar(self, tag));
let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
let promoted_layouts = ineligible_locals
size = size.max(variant.size);
align = align.max(variant.align);
- Ok(variant)
+ Ok(tcx.intern_layout(variant))
})
.collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
size = size.align_to(align.abi);
- let abi = if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi.is_uninhabited())
- {
- Abi::Uninhabited
- } else {
- Abi::Aggregate { sized: true }
- };
+ let abi =
+ if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi().is_uninhabited()) {
+ Abi::Uninhabited
+ } else {
+ Abi::Aggregate { sized: true }
+ };
- let layout = tcx.intern_layout(Layout {
+ let layout = tcx.intern_layout(LayoutS {
variants: Variants::Multiple {
tag,
tag_encoding: TagEncoding::Direct,
ty::Adt(def, _) => def.variants[variant_index].fields.len(),
_ => bug!(),
};
- tcx.intern_layout(Layout {
+ tcx.intern_layout(LayoutS {
variants: Variants::Single { index: variant_index },
fields: match NonZeroUsize::new(fields) {
Some(fields) => FieldsShape::Union(fields),
})
}
- Variants::Multiple { ref variants, .. } => &variants[variant_index],
+ Variants::Multiple { ref variants, .. } => variants[variant_index],
};
- assert_eq!(layout.variants, Variants::Single { index: variant_index });
+ assert_eq!(*layout.variants(), Variants::Single { index: variant_index });
TyAndLayout { ty: this.ty, layout }
}
) -> TyMaybeWithLayout<'tcx> {
let tcx = cx.tcx();
let tag_layout = |tag: Scalar| -> TyAndLayout<'tcx> {
- let layout = Layout::scalar(cx, tag);
- TyAndLayout { layout: tcx.intern_layout(layout), ty: tag.value.to_ty(tcx) }
+ TyAndLayout {
+ layout: tcx.intern_layout(LayoutS::scalar(cx, tag)),
+ ty: tag.value.to_ty(tcx),
+ }
};
match *this.ty.kind() {
Variants::Multiple { variants, .. } => variants
.iter_enumerated()
.filter_map(|(idx, layout)| {
- (layout.abi != Abi::Uninhabited)
+ (layout.abi() != Abi::Uninhabited)
.then(|| ty.discriminant_for_variant(tcx, idx).unwrap().val)
})
.collect(),
use std::ops::{Add, AddAssign, Deref, Mul, RangeInclusive, Sub};
use std::str::FromStr;
+use rustc_data_structures::intern::Interned;
use rustc_index::vec::{Idx, IndexVec};
use rustc_macros::HashStable_Generic;
use rustc_serialize::json::{Json, ToJson};
}
#[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
-pub enum Variants {
+pub enum Variants<'a> {
/// Single enum variants, structs/tuples, unions, and all non-ADTs.
Single { index: VariantIdx },
tag: Scalar,
tag_encoding: TagEncoding,
tag_field: usize,
- variants: IndexVec<VariantIdx, Layout>,
+ variants: IndexVec<VariantIdx, Layout<'a>>,
},
}
}
}
-#[derive(PartialEq, Eq, Hash, Debug, HashStable_Generic)]
-pub struct Layout {
+#[derive(PartialEq, Eq, Hash, HashStable_Generic)]
+pub struct LayoutS<'a> {
/// Says where the fields are located within the layout.
pub fields: FieldsShape,
///
/// To access all fields of this layout, both `fields` and the fields of the active variant
/// must be taken into account.
- pub variants: Variants,
+ pub variants: Variants<'a>,
/// The `abi` defines how this data is passed between functions, and it defines
/// value restrictions via `valid_range`.
pub size: Size,
}
-impl Layout {
+impl<'a> LayoutS<'a> {
pub fn scalar<C: HasDataLayout>(cx: &C, scalar: Scalar) -> Self {
let largest_niche = Niche::from_scalar(cx, Size::ZERO, scalar);
let size = scalar.value.size(cx);
let align = scalar.value.align(cx);
- Layout {
+ LayoutS {
variants: Variants::Single { index: VariantIdx::new(0) },
fields: FieldsShape::Primitive,
abi: Abi::Scalar(scalar),
}
}
+impl<'a> fmt::Debug for LayoutS<'a> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // This is how `Layout` used to print before it become
+ // `Interned<LayoutS>`. We print it like this to avoid having to update
+ // expected output in a lot of tests.
+ f.debug_struct("Layout")
+ .field("fields", &self.fields)
+ .field("variants", &self.variants)
+ .field("abi", &self.abi)
+ .field("largest_niche", &self.largest_niche)
+ .field("align", &self.align)
+ .field("size", &self.size)
+ .finish()
+ }
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Hash, HashStable_Generic)]
+#[cfg_attr(not(bootstrap), rustc_pass_by_value)]
+pub struct Layout<'a>(pub Interned<'a, LayoutS<'a>>);
+
+impl<'a> fmt::Debug for Layout<'a> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // See comment on `<LayoutS as Debug>::fmt` above.
+ self.0.0.fmt(f)
+ }
+}
+
+impl<'a> Layout<'a> {
+ pub fn fields(self) -> &'a FieldsShape {
+ &self.0.0.fields
+ }
+
+ pub fn variants(self) -> &'a Variants<'a> {
+ &self.0.0.variants
+ }
+
+ pub fn abi(self) -> Abi {
+ self.0.0.abi
+ }
+
+ pub fn largest_niche(self) -> Option<Niche> {
+ self.0.0.largest_niche
+ }
+
+ pub fn align(self) -> AbiAndPrefAlign {
+ self.0.0.align
+ }
+
+ pub fn size(self) -> Size {
+ self.0.0.size
+ }
+}
+
/// The layout of a type, alongside the type itself.
/// Provides various type traversal APIs (e.g., recursing into fields).
///
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, HashStable_Generic)]
pub struct TyAndLayout<'a, Ty> {
pub ty: Ty,
- pub layout: &'a Layout,
+ pub layout: Layout<'a>,
}
impl<'a, Ty> Deref for TyAndLayout<'a, Ty> {
- type Target = &'a Layout;
- fn deref(&self) -> &&'a Layout {
- &self.layout
+ type Target = &'a LayoutS<'a>;
+ fn deref(&self) -> &&'a LayoutS<'a> {
+ &self.layout.0.0
}
}
//! LLVM.
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
-#![feature(bool_to_option)]
-#![feature(let_else)]
-#![feature(nll)]
-#![feature(never_type)]
#![feature(associated_type_bounds)]
+#![feature(bool_to_option)]
#![feature(exhaustive_patterns)]
+#![feature(let_else)]
#![feature(min_specialization)]
+#![feature(never_type)]
+#![feature(nll)]
+#![feature(rustc_attrs)]
#![feature(step_trait)]
use std::iter::FromIterator;
}
fn document_type_layout(w: &mut Buffer, cx: &Context<'_>, ty_def_id: DefId) {
- fn write_size_of_layout(w: &mut Buffer, layout: &Layout, tag_size: u64) {
- if layout.abi.is_unsized() {
+ fn write_size_of_layout(w: &mut Buffer, layout: Layout<'_>, tag_size: u64) {
+ if layout.abi().is_unsized() {
write!(w, "(unsized)");
} else {
- let bytes = layout.size.bytes() - tag_size;
+ let bytes = layout.size().bytes() - tag_size;
write!(w, "{size} byte{pl}", size = bytes, pl = if bytes == 1 { "" } else { "s" },);
}
}
write_size_of_layout(w, ty_layout.layout, 0);
writeln!(w, "</p>");
if let Variants::Multiple { variants, tag, tag_encoding, .. } =
- &ty_layout.layout.variants
+ &ty_layout.layout.variants()
{
if !variants.is_empty() {
w.write_str(
for (index, layout) in variants.iter_enumerated() {
let name = adt.variants[index].name;
write!(w, "<li><code>{name}</code>: ", name = name);
- write_size_of_layout(w, layout, tag_size);
+ write_size_of_layout(w, *layout, tag_size);
writeln!(w, "</li>");
}
w.write_str("</ul>");
if let Ok(ty) = cx.tcx.try_normalize_erasing_regions(cx.param_env, ty);
if let Ok(layout) = cx.tcx.layout_of(cx.param_env.and(ty));
then {
- layout.layout.size.bytes() == 0
+ layout.layout.size().bytes() == 0
} else {
false
}