1 // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 use abi::{FnType, FnTypeExt};
14 use rustc::ty::{self, Ty, TypeFoldable};
15 use rustc::ty::layout::{self, Align, LayoutOf, Size, TyLayout};
16 use rustc_target::abi::FloatTy;
17 use rustc_mir::monomorphize::item::DefPathBasedNames;
19 use rustc_codegen_ssa::traits::*;
23 fn uncached_llvm_type<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
24 layout: TyLayout<'tcx>,
25 defer: &mut Option<(&'a Type, TyLayout<'tcx>)>)
28 layout::Abi::Scalar(_) => bug!("handled elsewhere"),
29 layout::Abi::Vector { ref element, count } => {
30 // LLVM has a separate type for 64-bit SIMD vectors on X86 called
31 // `x86_mmx` which is needed for some SIMD operations. As a bit of a
32 // hack (all SIMD definitions are super unstable anyway) we
33 // recognize any one-element SIMD vector as "this should be an
34 // x86_mmx" type. In general there shouldn't be a need for other
35 // one-element SIMD vectors, so it's assumed this won't clash with
37 let use_x86_mmx = count == 1 && layout.size.bits() == 64 &&
38 (cx.sess().target.target.arch == "x86" ||
39 cx.sess().target.target.arch == "x86_64");
41 return cx.type_x86_mmx()
43 let element = layout.scalar_llvm_type_at(cx, element, Size::ZERO);
44 return cx.type_vector(element, count);
47 layout::Abi::ScalarPair(..) => {
48 return cx.type_struct( &[
49 layout.scalar_pair_element_llvm_type(cx, 0, false),
50 layout.scalar_pair_element_llvm_type(cx, 1, false),
53 layout::Abi::Uninhabited |
54 layout::Abi::Aggregate { .. } => {}
57 let name = match layout.ty.sty {
61 // FIXME(eddyb) producing readable type names for trait objects can result
62 // in problematically distinct types due to HRTB and subtyping (see #47638).
66 let mut name = String::with_capacity(32);
67 let printer = DefPathBasedNames::new(cx.tcx, true, true);
68 printer.push_type_name(layout.ty, &mut name);
69 if let (&ty::Adt(def, _), &layout::Variants::Single { index })
70 = (&layout.ty.sty, &layout.variants)
72 if def.is_enum() && !def.variants.is_empty() {
73 write!(&mut name, "::{}", def.variants[index].name).unwrap();
82 layout::FieldPlacement::Union(_) => {
83 let fill = cx.type_padding_filler(layout.size, layout.align.abi);
87 cx.type_struct( &[fill], packed)
90 let llty = cx.type_named_struct( name);
91 cx.set_struct_body(llty, &[fill], packed);
96 layout::FieldPlacement::Array { count, .. } => {
97 cx.type_array(layout.field(cx, 0).llvm_type(cx), count)
99 layout::FieldPlacement::Arbitrary { .. } => {
102 let (llfields, packed) = struct_llfields(cx, layout);
103 cx.type_struct( &llfields, packed)
106 let llty = cx.type_named_struct( name);
107 *defer = Some((llty, layout));
115 fn struct_llfields<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
116 layout: TyLayout<'tcx>)
117 -> (Vec<&'a Type>, bool) {
118 debug!("struct_llfields: {:#?}", layout);
119 let field_count = layout.fields.count();
121 let mut packed = false;
122 let mut offset = Size::ZERO;
123 let mut prev_effective_align = layout.align.abi;
124 let mut result: Vec<_> = Vec::with_capacity(1 + field_count * 2);
125 for i in layout.fields.index_by_increasing_offset() {
126 let target_offset = layout.fields.offset(i as usize);
127 let field = layout.field(cx, i);
128 let effective_field_align = layout.align.abi
129 .min(field.align.abi)
130 .restrict_for_offset(target_offset);
131 packed |= effective_field_align < field.align.abi;
133 debug!("struct_llfields: {}: {:?} offset: {:?} target_offset: {:?} \
134 effective_field_align: {}",
135 i, field, offset, target_offset, effective_field_align.bytes());
136 assert!(target_offset >= offset);
137 let padding = target_offset - offset;
138 let padding_align = prev_effective_align.min(effective_field_align);
139 assert_eq!(offset.align_to(padding_align) + padding, target_offset);
140 result.push(cx.type_padding_filler( padding, padding_align));
141 debug!(" padding before: {:?}", padding);
143 result.push(field.llvm_type(cx));
144 offset = target_offset + field.size;
145 prev_effective_align = effective_field_align;
147 if !layout.is_unsized() && field_count > 0 {
148 if offset > layout.size {
149 bug!("layout: {:#?} stride: {:?} offset: {:?}",
150 layout, layout.size, offset);
152 let padding = layout.size - offset;
153 let padding_align = prev_effective_align;
154 assert_eq!(offset.align_to(padding_align) + padding, layout.size);
155 debug!("struct_llfields: pad_bytes: {:?} offset: {:?} stride: {:?}",
156 padding, offset, layout.size);
157 result.push(cx.type_padding_filler(padding, padding_align));
158 assert_eq!(result.len(), 1 + field_count * 2);
160 debug!("struct_llfields: offset: {:?} stride: {:?}",
161 offset, layout.size);
167 impl<'a, 'tcx> CodegenCx<'a, 'tcx> {
168 pub fn align_of(&self, ty: Ty<'tcx>) -> Align {
169 self.layout_of(ty).align.abi
172 pub fn size_of(&self, ty: Ty<'tcx>) -> Size {
173 self.layout_of(ty).size
176 pub fn size_and_align_of(&self, ty: Ty<'tcx>) -> (Size, Align) {
177 let layout = self.layout_of(ty);
178 (layout.size, layout.align.abi)
182 #[derive(Copy, Clone, PartialEq, Eq)]
183 pub enum PointerKind {
184 /// Most general case, we know no restrictions to tell LLVM.
187 /// `&T` where `T` contains no `UnsafeCell`, is `noalias` and `readonly`.
190 /// `&mut T`, when we know `noalias` is safe for LLVM.
193 /// `Box<T>`, unlike `UniqueBorrowed`, it also has `noalias` on returns.
197 #[derive(Copy, Clone)]
198 pub struct PointeeInfo {
201 pub safe: Option<PointerKind>,
204 pub trait LayoutLlvmExt<'tcx> {
205 fn is_llvm_immediate(&self) -> bool;
206 fn is_llvm_scalar_pair<'a>(&self) -> bool;
207 fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type;
208 fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type;
209 fn scalar_llvm_type_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>,
210 scalar: &layout::Scalar, offset: Size) -> &'a Type;
211 fn scalar_pair_element_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>,
212 index: usize, immediate: bool) -> &'a Type;
213 fn llvm_field_index(&self, index: usize) -> u64;
214 fn pointee_info_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, offset: Size)
215 -> Option<PointeeInfo>;
218 impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> {
219 fn is_llvm_immediate(&self) -> bool {
221 layout::Abi::Scalar(_) |
222 layout::Abi::Vector { .. } => true,
223 layout::Abi::ScalarPair(..) => false,
224 layout::Abi::Uninhabited |
225 layout::Abi::Aggregate { .. } => self.is_zst()
229 fn is_llvm_scalar_pair<'a>(&self) -> bool {
231 layout::Abi::ScalarPair(..) => true,
232 layout::Abi::Uninhabited |
233 layout::Abi::Scalar(_) |
234 layout::Abi::Vector { .. } |
235 layout::Abi::Aggregate { .. } => false
239 /// Get the LLVM type corresponding to a Rust type, i.e. `rustc::ty::Ty`.
240 /// The pointee type of the pointer in `PlaceRef` is always this type.
241 /// For sized types, it is also the right LLVM type for an `alloca`
242 /// containing a value of that type, and most immediates (except `bool`).
243 /// Unsized types, however, are represented by a "minimal unit", e.g.
244 /// `[T]` becomes `T`, while `str` and `Trait` turn into `i8` - this
245 /// is useful for indexing slices, as `&[T]`'s data pointer is `T*`.
246 /// If the type is an unsized struct, the regular layout is generated,
247 /// with the inner-most trailing unsized field using the "minimal unit"
248 /// of that field's type - this is useful for taking the address of
249 /// that field and ensuring the struct has the right alignment.
250 fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
251 if let layout::Abi::Scalar(ref scalar) = self.abi {
252 // Use a different cache for scalars because pointers to DSTs
253 // can be either fat or thin (data pointers of fat pointers).
254 if let Some(&llty) = cx.scalar_lltypes.borrow().get(&self.ty) {
257 let llty = match self.ty.sty {
259 ty::RawPtr(ty::TypeAndMut { ty, .. }) => {
260 cx.type_ptr_to(cx.layout_of(ty).llvm_type(cx))
262 ty::Adt(def, _) if def.is_box() => {
263 cx.type_ptr_to(cx.layout_of(self.ty.boxed_ty()).llvm_type(cx))
266 let sig = cx.tcx.normalize_erasing_late_bound_regions(
267 ty::ParamEnv::reveal_all(),
270 cx.fn_ptr_backend_type(&FnType::new(cx, sig, &[]))
272 _ => self.scalar_llvm_type_at(cx, scalar, Size::ZERO)
274 cx.scalar_lltypes.borrow_mut().insert(self.ty, llty);
280 let variant_index = match self.variants {
281 layout::Variants::Single { index } => Some(index),
284 if let Some(&llty) = cx.lltypes.borrow().get(&(self.ty, variant_index)) {
288 debug!("llvm_type({:#?})", self);
290 assert!(!self.ty.has_escaping_bound_vars(), "{:?} has escaping bound vars", self.ty);
292 // Make sure lifetimes are erased, to avoid generating distinct LLVM
293 // types for Rust types that only differ in the choice of lifetimes.
294 let normal_ty = cx.tcx.erase_regions(&self.ty);
296 let mut defer = None;
297 let llty = if self.ty != normal_ty {
298 let mut layout = cx.layout_of(normal_ty);
299 if let Some(v) = variant_index {
300 layout = layout.for_variant(cx, v);
304 uncached_llvm_type(cx, *self, &mut defer)
306 debug!("--> mapped {:#?} to llty={:?}", self, llty);
308 cx.lltypes.borrow_mut().insert((self.ty, variant_index), llty);
310 if let Some((llty, layout)) = defer {
311 let (llfields, packed) = struct_llfields(cx, layout);
312 cx.set_struct_body(llty, &llfields, packed)
318 fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
319 if let layout::Abi::Scalar(ref scalar) = self.abi {
320 if scalar.is_bool() {
327 fn scalar_llvm_type_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>,
328 scalar: &layout::Scalar, offset: Size) -> &'a Type {
330 layout::Int(i, _) => cx.type_from_integer( i),
331 layout::Float(FloatTy::F32) => cx.type_f32(),
332 layout::Float(FloatTy::F64) => cx.type_f64(),
334 // If we know the alignment, pick something better than i8.
335 let pointee = if let Some(pointee) = self.pointee_info_at(cx, offset) {
336 cx.type_pointee_for_align(pointee.align)
340 cx.type_ptr_to(pointee)
345 fn scalar_pair_element_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>,
346 index: usize, immediate: bool) -> &'a Type {
347 // HACK(eddyb) special-case fat pointers until LLVM removes
348 // pointee types, to avoid bitcasting every `OperandRef::deref`.
352 return self.field(cx, index).llvm_type(cx);
354 ty::Adt(def, _) if def.is_box() => {
355 let ptr_ty = cx.tcx.mk_mut_ptr(self.ty.boxed_ty());
356 return cx.layout_of(ptr_ty).scalar_pair_element_llvm_type(cx, index, immediate);
361 let (a, b) = match self.abi {
362 layout::Abi::ScalarPair(ref a, ref b) => (a, b),
363 _ => bug!("TyLayout::scalar_pair_element_llty({:?}): not applicable", self)
365 let scalar = [a, b][index];
367 // Make sure to return the same type `immediate_llvm_type` would when
368 // dealing with an immediate pair. This means that `(bool, bool)` is
369 // effectively represented as `{i8, i8}` in memory and two `i1`s as an
370 // immediate, just like `bool` is typically `i8` in memory and only `i1`
371 // when immediate. We need to load/store `bool` as `i8` to avoid
372 // crippling LLVM optimizations or triggering other LLVM bugs with `i1`.
373 if immediate && scalar.is_bool() {
377 let offset = if index == 0 {
380 a.value.size(cx).align_to(b.value.align(cx).abi)
382 self.scalar_llvm_type_at(cx, scalar, offset)
385 fn llvm_field_index(&self, index: usize) -> u64 {
387 layout::Abi::Scalar(_) |
388 layout::Abi::ScalarPair(..) => {
389 bug!("TyLayout::llvm_field_index({:?}): not applicable", self)
394 layout::FieldPlacement::Union(_) => {
395 bug!("TyLayout::llvm_field_index({:?}): not applicable", self)
398 layout::FieldPlacement::Array { .. } => {
402 layout::FieldPlacement::Arbitrary { .. } => {
403 1 + (self.fields.memory_index(index) as u64) * 2
408 fn pointee_info_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, offset: Size)
409 -> Option<PointeeInfo> {
410 if let Some(&pointee) = cx.pointee_infos.borrow().get(&(self.ty, offset)) {
414 let mut result = None;
416 ty::RawPtr(mt) if offset.bytes() == 0 => {
417 let (size, align) = cx.size_and_align_of(mt.ty);
418 result = Some(PointeeInfo {
425 ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
426 let (size, align) = cx.size_and_align_of(ty);
428 let kind = match mt {
429 hir::MutImmutable => if cx.type_is_freeze(ty) {
435 // Previously we would only emit noalias annotations for LLVM >= 6 or in
436 // panic=abort mode. That was deemed right, as prior versions had many bugs
437 // in conjunction with unwinding, but later versions didn’t seem to have
438 // said issues. See issue #31681.
440 // Alas, later on we encountered a case where noalias would generate wrong
441 // code altogether even with recent versions of LLVM in *safe* code with no
442 // unwinding involved. See #54462.
444 // For now, do not enable mutable_noalias by default at all, while the
445 // issue is being figured out.
446 let mutable_noalias = cx.tcx.sess.opts.debugging_opts.mutable_noalias
449 PointerKind::UniqueBorrowed
456 result = Some(PointeeInfo {
464 let mut data_variant = match self.variants {
465 layout::Variants::NicheFilling { dataful_variant, .. } => {
466 // Only the niche itself is always initialized,
467 // so only check for a pointer at its offset.
469 // If the niche is a pointer, it's either valid
470 // (according to its type), or null (which the
471 // niche field's scalar validity range encodes).
472 // This allows using `dereferenceable_or_null`
473 // for e.g. `Option<&T>`, and this will continue
474 // to work as long as we don't start using more
475 // niches than just null (e.g. the first page
476 // of the address space, or unaligned pointers).
477 if self.fields.offset(0) == offset {
478 Some(self.for_variant(cx, dataful_variant))
486 if let Some(variant) = data_variant {
487 // We're not interested in any unions.
488 if let layout::FieldPlacement::Union(_) = variant.fields {
493 if let Some(variant) = data_variant {
494 let ptr_end = offset + layout::Pointer.size(cx);
495 for i in 0..variant.fields.count() {
496 let field_start = variant.fields.offset(i);
497 if field_start <= offset {
498 let field = variant.field(cx, i);
499 if ptr_end <= field_start + field.size {
500 // We found the right field, look inside it.
501 result = field.pointee_info_at(cx, offset - field_start);
508 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
509 if let Some(ref mut pointee) = result {
510 if let ty::Adt(def, _) = self.ty.sty {
511 if def.is_box() && offset.bytes() == 0 {
512 pointee.safe = Some(PointerKind::UniqueOwned);
519 cx.pointee_infos.borrow_mut().insert((self.ty, offset), result);