2 use crate::context::TypeLowering;
3 use crate::llvm_util::get_version;
4 use crate::type_::Type;
5 use rustc_codegen_ssa::traits::*;
7 use rustc_middle::ty::layout::{FnAbiOf, LayoutOf, TyAndLayout};
8 use rustc_middle::ty::print::{with_no_trimmed_paths, with_no_visible_paths};
9 use rustc_middle::ty::{self, Ty, TypeFoldable};
10 use rustc_target::abi::{Abi, AddressSpace, Align, FieldsShape};
11 use rustc_target::abi::{Int, Pointer, F32, F64};
12 use rustc_target::abi::{PointeeInfo, Scalar, Size, TyAbiInterface, Variants};
13 use smallvec::{smallvec, SmallVec};
18 fn uncached_llvm_type<'a, 'tcx>(
19 cx: &CodegenCx<'a, 'tcx>,
20 layout: TyAndLayout<'tcx>,
21 defer: &mut Option<(&'a Type, TyAndLayout<'tcx>)>,
22 field_remapping: &mut Option<SmallVec<[u32; 4]>>,
25 Abi::Scalar(_) => bug!("handled elsewhere"),
26 Abi::Vector { element, count } => {
27 let element = layout.scalar_llvm_type_at(cx, element, Size::ZERO);
28 return cx.type_vector(element, count);
30 Abi::ScalarPair(..) => {
31 return cx.type_struct(
33 layout.scalar_pair_element_llvm_type(cx, 0, false),
34 layout.scalar_pair_element_llvm_type(cx, 1, false),
39 Abi::Uninhabited | Abi::Aggregate { .. } => {}
42 let name = match layout.ty.kind() {
43 // FIXME(eddyb) producing readable type names for trait objects can result
44 // in problematically distinct types due to HRTB and subtyping (see #47638).
46 ty::Adt(..) | ty::Closure(..) | ty::Foreign(..) | ty::Generator(..) | ty::Str
47 // For performance reasons we use names only when emitting LLVM IR. Unless we are on
48 // LLVM < 14, where the use of unnamed types resulted in various issues, e.g., #76213,
49 // #79564, and #79246.
50 if get_version() < (14, 0, 0) || !cx.sess().fewer_names() =>
52 let mut name = with_no_visible_paths!(with_no_trimmed_paths!(layout.ty.to_string()));
53 if let (&ty::Adt(def, _), &Variants::Single { index }) =
54 (layout.ty.kind(), &layout.variants)
56 if def.is_enum() && !def.variants().is_empty() {
57 write!(&mut name, "::{}", def.variant(index).name).unwrap();
60 if let (&ty::Generator(_, _, _), &Variants::Single { index }) =
61 (layout.ty.kind(), &layout.variants)
63 write!(&mut name, "::{}", ty::GeneratorSubsts::variant_name(index)).unwrap();
67 // Use identified structure types for ADT. Due to pointee types in LLVM IR their definition
68 // might be recursive. Other cases are non-recursive and we can use literal structure types.
69 ty::Adt(..) => Some(String::new()),
74 FieldsShape::Primitive | FieldsShape::Union(_) => {
75 let fill = cx.type_padding_filler(layout.size, layout.align.abi);
78 None => cx.type_struct(&[fill], packed),
80 let llty = cx.type_named_struct(name);
81 cx.set_struct_body(llty, &[fill], packed);
86 FieldsShape::Array { count, .. } => cx.type_array(layout.field(cx, 0).llvm_type(cx), count),
87 FieldsShape::Arbitrary { .. } => match name {
89 let (llfields, packed, new_field_remapping) = struct_llfields(cx, layout);
90 *field_remapping = new_field_remapping;
91 cx.type_struct(&llfields, packed)
94 let llty = cx.type_named_struct(name);
95 *defer = Some((llty, layout));
102 fn struct_llfields<'a, 'tcx>(
103 cx: &CodegenCx<'a, 'tcx>,
104 layout: TyAndLayout<'tcx>,
105 ) -> (Vec<&'a Type>, bool, Option<SmallVec<[u32; 4]>>) {
106 debug!("struct_llfields: {:#?}", layout);
107 let field_count = layout.fields.count();
109 let mut packed = false;
110 let mut offset = Size::ZERO;
111 let mut prev_effective_align = layout.align.abi;
112 let mut result: Vec<_> = Vec::with_capacity(1 + field_count * 2);
113 let mut field_remapping = smallvec![0; field_count];
114 for i in layout.fields.index_by_increasing_offset() {
115 let target_offset = layout.fields.offset(i as usize);
116 let field = layout.field(cx, i);
117 let effective_field_align =
118 layout.align.abi.min(field.align.abi).restrict_for_offset(target_offset);
119 packed |= effective_field_align < field.align.abi;
122 "struct_llfields: {}: {:?} offset: {:?} target_offset: {:?} \
123 effective_field_align: {}",
128 effective_field_align.bytes()
130 assert!(target_offset >= offset);
131 let padding = target_offset - offset;
132 if padding != Size::ZERO {
133 let padding_align = prev_effective_align.min(effective_field_align);
134 assert_eq!(offset.align_to(padding_align) + padding, target_offset);
135 result.push(cx.type_padding_filler(padding, padding_align));
136 debug!(" padding before: {:?}", padding);
138 field_remapping[i] = result.len() as u32;
139 result.push(field.llvm_type(cx));
140 offset = target_offset + field.size;
141 prev_effective_align = effective_field_align;
143 let padding_used = result.len() > field_count;
144 if !layout.is_unsized() && field_count > 0 {
145 if offset > layout.size {
146 bug!("layout: {:#?} stride: {:?} offset: {:?}", layout, layout.size, offset);
148 let padding = layout.size - offset;
149 if padding != Size::ZERO {
150 let padding_align = prev_effective_align;
151 assert_eq!(offset.align_to(padding_align) + padding, layout.size);
153 "struct_llfields: pad_bytes: {:?} offset: {:?} stride: {:?}",
154 padding, offset, layout.size
156 result.push(cx.type_padding_filler(padding, padding_align));
159 debug!("struct_llfields: offset: {:?} stride: {:?}", offset, layout.size);
161 let field_remapping = if padding_used { Some(field_remapping) } else { None };
162 (result, packed, field_remapping)
165 impl<'a, 'tcx> CodegenCx<'a, 'tcx> {
166 pub fn align_of(&self, ty: Ty<'tcx>) -> Align {
167 self.layout_of(ty).align.abi
170 pub fn size_of(&self, ty: Ty<'tcx>) -> Size {
171 self.layout_of(ty).size
174 pub fn size_and_align_of(&self, ty: Ty<'tcx>) -> (Size, Align) {
175 let layout = self.layout_of(ty);
176 (layout.size, layout.align.abi)
180 pub trait LayoutLlvmExt<'tcx> {
181 fn is_llvm_immediate(&self) -> bool;
182 fn is_llvm_scalar_pair(&self) -> bool;
183 fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type;
184 fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type;
185 fn scalar_llvm_type_at<'a>(
187 cx: &CodegenCx<'a, 'tcx>,
191 fn scalar_pair_element_llvm_type<'a>(
193 cx: &CodegenCx<'a, 'tcx>,
197 fn llvm_field_index<'a>(&self, cx: &CodegenCx<'a, 'tcx>, index: usize) -> u64;
198 fn pointee_info_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, offset: Size) -> Option<PointeeInfo>;
201 impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
202 fn is_llvm_immediate(&self) -> bool {
204 Abi::Scalar(_) | Abi::Vector { .. } => true,
205 Abi::ScalarPair(..) => false,
206 Abi::Uninhabited | Abi::Aggregate { .. } => self.is_zst(),
210 fn is_llvm_scalar_pair(&self) -> bool {
212 Abi::ScalarPair(..) => true,
213 Abi::Uninhabited | Abi::Scalar(_) | Abi::Vector { .. } | Abi::Aggregate { .. } => false,
217 /// Gets the LLVM type corresponding to a Rust type, i.e., `rustc_middle::ty::Ty`.
218 /// The pointee type of the pointer in `PlaceRef` is always this type.
219 /// For sized types, it is also the right LLVM type for an `alloca`
220 /// containing a value of that type, and most immediates (except `bool`).
221 /// Unsized types, however, are represented by a "minimal unit", e.g.
222 /// `[T]` becomes `T`, while `str` and `Trait` turn into `i8` - this
223 /// is useful for indexing slices, as `&[T]`'s data pointer is `T*`.
224 /// If the type is an unsized struct, the regular layout is generated,
225 /// with the inner-most trailing unsized field using the "minimal unit"
226 /// of that field's type - this is useful for taking the address of
227 /// that field and ensuring the struct has the right alignment.
228 fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
229 if let Abi::Scalar(scalar) = self.abi {
230 // Use a different cache for scalars because pointers to DSTs
231 // can be either fat or thin (data pointers of fat pointers).
232 if let Some(&llty) = cx.scalar_lltypes.borrow().get(&self.ty) {
235 let llty = match *self.ty.kind() {
236 ty::Ref(_, ty, _) | ty::RawPtr(ty::TypeAndMut { ty, .. }) => {
237 cx.type_ptr_to(cx.layout_of(ty).llvm_type(cx))
239 ty::Adt(def, _) if def.is_box() => {
240 cx.type_ptr_to(cx.layout_of(self.ty.boxed_ty()).llvm_type(cx))
243 cx.fn_ptr_backend_type(cx.fn_abi_of_fn_ptr(sig, ty::List::empty()))
245 _ => self.scalar_llvm_type_at(cx, scalar, Size::ZERO),
247 cx.scalar_lltypes.borrow_mut().insert(self.ty, llty);
252 let variant_index = match self.variants {
253 Variants::Single { index } => Some(index),
256 if let Some(llty) = cx.type_lowering.borrow().get(&(self.ty, variant_index)) {
260 debug!("llvm_type({:#?})", self);
262 assert!(!self.ty.has_escaping_bound_vars(), "{:?} has escaping bound vars", self.ty);
264 // Make sure lifetimes are erased, to avoid generating distinct LLVM
265 // types for Rust types that only differ in the choice of lifetimes.
266 let normal_ty = cx.tcx.erase_regions(self.ty);
268 let mut defer = None;
269 let mut field_remapping = None;
270 let llty = if self.ty != normal_ty {
271 let mut layout = cx.layout_of(normal_ty);
272 if let Some(v) = variant_index {
273 layout = layout.for_variant(cx, v);
277 uncached_llvm_type(cx, *self, &mut defer, &mut field_remapping)
279 debug!("--> mapped {:#?} to llty={:?}", self, llty);
283 .insert((self.ty, variant_index), TypeLowering { lltype: llty, field_remapping });
285 if let Some((llty, layout)) = defer {
286 let (llfields, packed, new_field_remapping) = struct_llfields(cx, layout);
287 cx.set_struct_body(llty, &llfields, packed);
290 .get_mut(&(self.ty, variant_index))
292 .field_remapping = new_field_remapping;
297 fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
298 if let Abi::Scalar(scalar) = self.abi {
299 if scalar.is_bool() {
306 fn scalar_llvm_type_at<'a>(
308 cx: &CodegenCx<'a, 'tcx>,
312 match scalar.primitive() {
313 Int(i, _) => cx.type_from_integer(i),
314 F32 => cx.type_f32(),
315 F64 => cx.type_f64(),
317 // If we know the alignment, pick something better than i8.
318 let (pointee, address_space) =
319 if let Some(pointee) = self.pointee_info_at(cx, offset) {
320 (cx.type_pointee_for_align(pointee.align), pointee.address_space)
322 (cx.type_i8(), AddressSpace::DATA)
324 cx.type_ptr_to_ext(pointee, address_space)
329 fn scalar_pair_element_llvm_type<'a>(
331 cx: &CodegenCx<'a, 'tcx>,
335 // HACK(eddyb) special-case fat pointers until LLVM removes
336 // pointee types, to avoid bitcasting every `OperandRef::deref`.
337 match self.ty.kind() {
338 ty::Ref(..) | ty::RawPtr(_) => {
339 return self.field(cx, index).llvm_type(cx);
341 // only wide pointer boxes are handled as pointers
342 // thin pointer boxes with scalar allocators are handled by the general logic below
343 ty::Adt(def, substs) if def.is_box() && cx.layout_of(substs.type_at(1)).is_zst() => {
344 let ptr_ty = cx.tcx.mk_mut_ptr(self.ty.boxed_ty());
345 return cx.layout_of(ptr_ty).scalar_pair_element_llvm_type(cx, index, immediate);
350 let Abi::ScalarPair(a, b) = self.abi else {
351 bug!("TyAndLayout::scalar_pair_element_llty({:?}): not applicable", self);
353 let scalar = [a, b][index];
355 // Make sure to return the same type `immediate_llvm_type` would when
356 // dealing with an immediate pair. This means that `(bool, bool)` is
357 // effectively represented as `{i8, i8}` in memory and two `i1`s as an
358 // immediate, just like `bool` is typically `i8` in memory and only `i1`
359 // when immediate. We need to load/store `bool` as `i8` to avoid
360 // crippling LLVM optimizations or triggering other LLVM bugs with `i1`.
361 if immediate && scalar.is_bool() {
365 let offset = if index == 0 { Size::ZERO } else { a.size(cx).align_to(b.align(cx).abi) };
366 self.scalar_llvm_type_at(cx, scalar, offset)
369 fn llvm_field_index<'a>(&self, cx: &CodegenCx<'a, 'tcx>, index: usize) -> u64 {
371 Abi::Scalar(_) | Abi::ScalarPair(..) => {
372 bug!("TyAndLayout::llvm_field_index({:?}): not applicable", self)
377 FieldsShape::Primitive | FieldsShape::Union(_) => {
378 bug!("TyAndLayout::llvm_field_index({:?}): not applicable", self)
381 FieldsShape::Array { .. } => index as u64,
383 FieldsShape::Arbitrary { .. } => {
384 let variant_index = match self.variants {
385 Variants::Single { index } => Some(index),
389 // Look up llvm field if indexes do not match memory order due to padding. If
390 // `field_remapping` is `None` no padding was used and the llvm field index
391 // matches the memory index.
392 match cx.type_lowering.borrow().get(&(self.ty, variant_index)) {
393 Some(TypeLowering { field_remapping: Some(ref remap), .. }) => {
396 Some(_) => self.fields.memory_index(index) as u64,
398 bug!("TyAndLayout::llvm_field_index({:?}): type info not found", self)
405 // FIXME(eddyb) this having the same name as `TyAndLayout::pointee_info_at`
406 // (the inherent method, which is lacking this caching logic) can result in
407 // the uncached version being called - not wrong, but potentially inefficient.
408 fn pointee_info_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, offset: Size) -> Option<PointeeInfo> {
409 if let Some(&pointee) = cx.pointee_infos.borrow().get(&(self.ty, offset)) {
413 let result = Ty::ty_and_layout_pointee_info_at(*self, cx, offset);
415 cx.pointee_infos.borrow_mut().insert((self.ty, offset), result);