1 // Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
14 use rustc::ty::{self, Ty, TypeFoldable};
15 use rustc::ty::layout::{self, Align, LayoutOf, Size, TyLayout};
16 use rustc_back::PanicStrategy;
17 use trans_item::DefPathBasedNames;
22 fn uncached_llvm_type<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
23 layout: TyLayout<'tcx>,
24 defer: &mut Option<(Type, TyLayout<'tcx>)>)
27 layout::Abi::Scalar(_) => bug!("handled elsewhere"),
28 layout::Abi::Vector => {
29 // LLVM has a separate type for 64-bit SIMD vectors on X86 called
30 // `x86_mmx` which is needed for some SIMD operations. As a bit of a
31 // hack (all SIMD definitions are super unstable anyway) we
32 // recognize any one-element SIMD vector as "this should be an
33 // x86_mmx" type. In general there shouldn't be a need for other
34 // one-element SIMD vectors, so it's assumed this won't clash with
36 let use_x86_mmx = layout.fields.count() == 1 &&
37 layout.size.bits() == 64 &&
38 (ccx.sess().target.target.arch == "x86" ||
39 ccx.sess().target.target.arch == "x86_64");
41 return Type::x86_mmx(ccx)
43 return Type::vector(&layout.field(ccx, 0).llvm_type(ccx),
44 layout.fields.count() as u64);
47 layout::Abi::ScalarPair(..) => {
48 return Type::struct_(ccx, &[
49 layout.scalar_pair_element_llvm_type(ccx, 0),
50 layout.scalar_pair_element_llvm_type(ccx, 1),
53 layout::Abi::Uninhabited |
54 layout::Abi::Aggregate { .. } => {}
57 let name = match layout.ty.sty {
64 let mut name = String::with_capacity(32);
65 let printer = DefPathBasedNames::new(ccx.tcx(), true, true);
66 printer.push_type_name(layout.ty, &mut name);
67 match (&layout.ty.sty, &layout.variants) {
68 (&ty::TyAdt(def, _), &layout::Variants::Single { index }) => {
69 if def.is_enum() && !def.variants.is_empty() {
70 write!(&mut name, "::{}", def.variants[index].name).unwrap();
81 layout::FieldPlacement::Union(_) => {
82 let size = layout.size.bytes();
83 let fill = Type::array(&Type::i8(ccx), size);
86 Type::struct_(ccx, &[fill], layout.is_packed())
89 let mut llty = Type::named_struct(ccx, name);
90 llty.set_struct_body(&[fill], layout.is_packed());
95 layout::FieldPlacement::Array { count, .. } => {
96 Type::array(&layout.field(ccx, 0).llvm_type(ccx), count)
98 layout::FieldPlacement::Arbitrary { .. } => {
101 Type::struct_(ccx, &struct_llfields(ccx, layout), layout.is_packed())
104 let llty = Type::named_struct(ccx, name);
105 *defer = Some((llty, layout));
113 fn struct_llfields<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
114 layout: TyLayout<'tcx>) -> Vec<Type> {
115 debug!("struct_llfields: {:#?}", layout);
116 let field_count = layout.fields.count();
118 let mut offset = Size::from_bytes(0);
119 let mut result: Vec<Type> = Vec::with_capacity(1 + field_count * 2);
120 for i in layout.fields.index_by_increasing_offset() {
121 let field = layout.field(ccx, i);
122 let target_offset = layout.fields.offset(i as usize);
123 debug!("struct_llfields: {}: {:?} offset: {:?} target_offset: {:?}",
124 i, field, offset, target_offset);
125 assert!(target_offset >= offset);
126 let padding = target_offset - offset;
127 result.push(Type::array(&Type::i8(ccx), padding.bytes()));
128 debug!(" padding before: {:?}", padding);
130 result.push(field.llvm_type(ccx));
132 if layout.is_packed() {
133 assert_eq!(padding.bytes(), 0);
135 assert!(field.align.abi() <= layout.align.abi(),
136 "non-packed type has field with larger align ({}): {:#?}",
137 field.align.abi(), layout);
140 offset = target_offset + field.size;
142 if !layout.is_unsized() && field_count > 0 {
143 if offset > layout.size {
144 bug!("layout: {:#?} stride: {:?} offset: {:?}",
145 layout, layout.size, offset);
147 let padding = layout.size - offset;
148 debug!("struct_llfields: pad_bytes: {:?} offset: {:?} stride: {:?}",
149 padding, offset, layout.size);
150 result.push(Type::array(&Type::i8(ccx), padding.bytes()));
151 assert!(result.len() == 1 + field_count * 2);
153 debug!("struct_llfields: offset: {:?} stride: {:?}",
154 offset, layout.size);
160 impl<'a, 'tcx> CrateContext<'a, 'tcx> {
161 pub fn align_of(&self, ty: Ty<'tcx>) -> Align {
162 self.layout_of(ty).align
165 pub fn size_of(&self, ty: Ty<'tcx>) -> Size {
166 self.layout_of(ty).size
169 pub fn size_and_align_of(&self, ty: Ty<'tcx>) -> (Size, Align) {
170 self.layout_of(ty).size_and_align()
174 #[derive(Copy, Clone, PartialEq, Eq)]
175 pub enum PointerKind {
176 /// Most general case, we know no restrictions to tell LLVM.
179 /// `&T` where `T` contains no `UnsafeCell`, is `noalias` and `readonly`.
182 /// `&mut T`, when we know `noalias` is safe for LLVM.
185 /// `Box<T>`, unlike `UniqueBorrowed`, it also has `noalias` on returns.
189 #[derive(Copy, Clone)]
190 pub struct PointeeInfo {
193 pub safe: Option<PointerKind>,
196 pub trait LayoutLlvmExt<'tcx> {
197 fn is_llvm_immediate(&self) -> bool;
198 fn is_llvm_scalar_pair<'a>(&self) -> bool;
199 fn llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type;
200 fn immediate_llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type;
201 fn scalar_pair_element_llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>,
202 index: usize) -> Type;
203 fn llvm_field_index(&self, index: usize) -> u64;
204 fn pointee_info_at<'a>(&self, ccx: &CrateContext<'a, 'tcx>, offset: Size)
205 -> Option<PointeeInfo>;
208 impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> {
209 fn is_llvm_immediate(&self) -> bool {
211 layout::Abi::Uninhabited |
212 layout::Abi::Scalar(_) |
213 layout::Abi::Vector => true,
214 layout::Abi::ScalarPair(..) => false,
215 layout::Abi::Aggregate { .. } => self.is_zst()
219 fn is_llvm_scalar_pair<'a>(&self) -> bool {
221 layout::Abi::ScalarPair(..) => true,
222 layout::Abi::Uninhabited |
223 layout::Abi::Scalar(_) |
224 layout::Abi::Vector |
225 layout::Abi::Aggregate { .. } => false
229 /// Get the LLVM type corresponding to a Rust type, i.e. `rustc::ty::Ty`.
230 /// The pointee type of the pointer in `PlaceRef` is always this type.
231 /// For sized types, it is also the right LLVM type for an `alloca`
232 /// containing a value of that type, and most immediates (except `bool`).
233 /// Unsized types, however, are represented by a "minimal unit", e.g.
234 /// `[T]` becomes `T`, while `str` and `Trait` turn into `i8` - this
235 /// is useful for indexing slices, as `&[T]`'s data pointer is `T*`.
236 /// If the type is an unsized struct, the regular layout is generated,
237 /// with the inner-most trailing unsized field using the "minimal unit"
238 /// of that field's type - this is useful for taking the address of
239 /// that field and ensuring the struct has the right alignment.
240 fn llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type {
241 if let layout::Abi::Scalar(ref scalar) = self.abi {
242 // Use a different cache for scalars because pointers to DSTs
243 // can be either fat or thin (data pointers of fat pointers).
244 if let Some(&llty) = ccx.scalar_lltypes().borrow().get(&self.ty) {
247 let llty = match scalar.value {
248 layout::Int(i, _) => Type::from_integer(ccx, i),
249 layout::F32 => Type::f32(ccx),
250 layout::F64 => Type::f64(ccx),
252 let pointee = match self.ty.sty {
253 ty::TyRef(_, ty::TypeAndMut { ty, .. }) |
254 ty::TyRawPtr(ty::TypeAndMut { ty, .. }) => {
255 ccx.layout_of(ty).llvm_type(ccx)
257 ty::TyAdt(def, _) if def.is_box() => {
258 ccx.layout_of(self.ty.boxed_ty()).llvm_type(ccx)
260 ty::TyFnPtr(sig) => {
261 let sig = ccx.tcx().erase_late_bound_regions_and_normalize(&sig);
262 FnType::new(ccx, sig, &[]).llvm_type(ccx)
265 // If we know the alignment, pick something better than i8.
266 if let Some(pointee) = self.pointee_info_at(ccx, Size::from_bytes(0)) {
267 Type::pointee_for_abi_align(ccx, pointee.align)
276 ccx.scalar_lltypes().borrow_mut().insert(self.ty, llty);
282 let variant_index = match self.variants {
283 layout::Variants::Single { index } => Some(index),
286 if let Some(&llty) = ccx.lltypes().borrow().get(&(self.ty, variant_index)) {
290 debug!("llvm_type({:#?})", self);
292 assert!(!self.ty.has_escaping_regions(), "{:?} has escaping regions", self.ty);
294 // Make sure lifetimes are erased, to avoid generating distinct LLVM
295 // types for Rust types that only differ in the choice of lifetimes.
296 let normal_ty = ccx.tcx().erase_regions(&self.ty);
298 let mut defer = None;
299 let llty = if self.ty != normal_ty {
300 let mut layout = ccx.layout_of(normal_ty);
301 if let Some(v) = variant_index {
302 layout = layout.for_variant(ccx, v);
304 layout.llvm_type(ccx)
306 uncached_llvm_type(ccx, *self, &mut defer)
308 debug!("--> mapped {:#?} to llty={:?}", self, llty);
310 ccx.lltypes().borrow_mut().insert((self.ty, variant_index), llty);
312 if let Some((mut llty, layout)) = defer {
313 llty.set_struct_body(&struct_llfields(ccx, layout), layout.is_packed())
319 fn immediate_llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type {
320 if let layout::Abi::Scalar(ref scalar) = self.abi {
321 if scalar.is_bool() {
322 return Type::i1(ccx);
328 fn scalar_pair_element_llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>,
329 index: usize) -> Type {
330 // HACK(eddyb) special-case fat pointers until LLVM removes
331 // pointee types, to avoid bitcasting every `OperandRef::deref`.
335 return self.field(ccx, index).llvm_type(ccx);
337 ty::TyAdt(def, _) if def.is_box() => {
338 let ptr_ty = ccx.tcx().mk_mut_ptr(self.ty.boxed_ty());
339 return ccx.layout_of(ptr_ty).scalar_pair_element_llvm_type(ccx, index);
344 let (a, b) = match self.abi {
345 layout::Abi::ScalarPair(ref a, ref b) => (a, b),
346 _ => bug!("TyLayout::scalar_pair_element_llty({:?}): not applicable", self)
348 let scalar = [a, b][index];
350 // Make sure to return the same type `immediate_llvm_type` would,
351 // to avoid dealing with two types and the associated conversions.
352 // This means that `(bool, bool)` is represented as `{i1, i1}`,
353 // both in memory and as an immediate, while `bool` is typically
354 // `i8` in memory and only `i1` when immediate. While we need to
355 // load/store `bool` as `i8` to avoid crippling LLVM optimizations,
356 // `i1` in a LLVM aggregate is valid and mostly equivalent to `i8`.
357 if scalar.is_bool() {
358 return Type::i1(ccx);
362 layout::Int(i, _) => Type::from_integer(ccx, i),
363 layout::F32 => Type::f32(ccx),
364 layout::F64 => Type::f64(ccx),
366 // If we know the alignment, pick something better than i8.
367 let offset = if index == 0 {
370 a.value.size(ccx).abi_align(b.value.align(ccx))
372 let pointee = if let Some(pointee) = self.pointee_info_at(ccx, offset) {
373 Type::pointee_for_abi_align(ccx, pointee.align)
382 fn llvm_field_index(&self, index: usize) -> u64 {
384 layout::Abi::Scalar(_) |
385 layout::Abi::ScalarPair(..) => {
386 bug!("TyLayout::llvm_field_index({:?}): not applicable", self)
391 layout::FieldPlacement::Union(_) => {
392 bug!("TyLayout::llvm_field_index({:?}): not applicable", self)
395 layout::FieldPlacement::Array { .. } => {
399 layout::FieldPlacement::Arbitrary { .. } => {
400 1 + (self.fields.memory_index(index) as u64) * 2
405 fn pointee_info_at<'a>(&self, ccx: &CrateContext<'a, 'tcx>, offset: Size)
406 -> Option<PointeeInfo> {
407 if let Some(&pointee) = ccx.pointee_infos().borrow().get(&(self.ty, offset)) {
411 let mut result = None;
413 ty::TyRawPtr(mt) if offset.bytes() == 0 => {
414 let (size, align) = ccx.size_and_align_of(mt.ty);
415 result = Some(PointeeInfo {
422 ty::TyRef(_, mt) if offset.bytes() == 0 => {
423 let (size, align) = ccx.size_and_align_of(mt.ty);
425 let kind = match mt.mutbl {
426 hir::MutImmutable => if ccx.shared().type_is_freeze(mt.ty) {
432 if ccx.shared().tcx().sess.opts.debugging_opts.mutable_noalias ||
433 ccx.shared().tcx().sess.panic_strategy() == PanicStrategy::Abort {
434 PointerKind::UniqueBorrowed
441 result = Some(PointeeInfo {
449 let mut data_variant = match self.variants {
450 layout::Variants::NicheFilling { dataful_variant, .. } => {
451 // Only the niche itself is always initialized,
452 // so only check for a pointer at its offset.
454 // If the niche is a pointer, it's either valid
455 // (according to its type), or null (which the
456 // niche field's scalar validity range encodes).
457 // This allows using `dereferenceable_or_null`
458 // for e.g. `Option<&T>`, and this will continue
459 // to work as long as we don't start using more
460 // niches than just null (e.g. the first page
461 // of the address space, or unaligned pointers).
462 if self.fields.offset(0) == offset {
463 Some(self.for_variant(ccx, dataful_variant))
471 if let Some(variant) = data_variant {
472 // We're not interested in any unions.
473 if let layout::FieldPlacement::Union(_) = variant.fields {
478 if let Some(variant) = data_variant {
479 let ptr_end = offset + layout::Pointer.size(ccx);
480 for i in 0..variant.fields.count() {
481 let field_start = variant.fields.offset(i);
482 if field_start <= offset {
483 let field = variant.field(ccx, i);
484 if ptr_end <= field_start + field.size {
485 // We found the right field, look inside it.
486 result = field.pointee_info_at(ccx, offset - field_start);
493 // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
494 if let Some(ref mut pointee) = result {
495 if let ty::TyAdt(def, _) = self.ty.sty {
496 if def.is_box() && offset.bytes() == 0 {
497 pointee.safe = Some(PointerKind::UniqueOwned);
504 ccx.pointee_infos().borrow_mut().insert((self.ty, offset), result);