1 // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! # Representation of Algebraic Data Types
13 //! This module determines how to represent enums, structs, and tuples
14 //! based on their monomorphized types; it is responsible both for
15 //! choosing a representation and translating basic operations on
16 //! values of those types. (Note: exporting the representations for
17 //! debuggers is handled in debuginfo.rs, not here.)
19 //! Note that the interface treats everything as a general case of an
20 //! enum, so structs/tuples/etc. have one pseudo-variant with
21 //! discriminant 0; i.e., as if they were a univariant enum.
23 //! Having everything in one place will enable improvements to data
24 //! structure representation; possibilities include:
26 //! - User-specified alignment (e.g., cacheline-aligning parts of
27 //! concurrently accessed data structures); LLVM can't represent this
28 //! directly, so we'd have to insert padding fields in any structure
29 //! that might contain one and adjust GEP indices accordingly. See
32 //! - Store nested enums' discriminants in the same word. Rather, if
33 //! some variants start with enums, and those enums representations
34 //! have unused alignment padding between discriminant and body, the
35 //! outer enum's discriminant can be stored there and those variants
36 //! can start at offset 0. Kind of fancy, and might need work to
37 //! make copies of the inner enum type cooperate, but it could help
38 //! with `Option` or `Result` wrapped around another enum.
40 //! - Tagged pointers would be neat, but given that any type can be
41 //! used unboxed and any field can have pointers (including mutable)
42 //! taken to it, implementing them for Rust seems difficult.
46 use llvm::{ValueRef, True, IntEQ, IntNE};
47 use rustc::ty::{self, Ty};
48 use rustc::ty::layout::{self, LayoutTyper};
57 use mir::lvalue::Alignment;
59 /// Given an enum, struct, closure, or tuple, extracts fields.
60 /// Treats closures as a struct with one variant.
61 /// `empty_if_no_variants` is a switch to deal with empty enums.
62 /// If true, `variant_index` is disregarded and an empty Vec returned in this case.
63 pub fn compute_fields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>,
65 empty_if_no_variants: bool) -> Vec<Ty<'tcx>> {
67 ty::TyAdt(ref def, _) if def.variants.len() == 0 && empty_if_no_variants => {
70 ty::TyAdt(ref def, ref substs) => {
71 def.variants[variant_index].fields.iter().map(|f| {
72 monomorphize::field_ty(cx.tcx(), substs, f)
73 }).collect::<Vec<_>>()
75 ty::TyTuple(fields, _) => fields.to_vec(),
76 ty::TyClosure(def_id, substs) => {
77 if variant_index > 0 { bug!("{} is a closure, which only has one variant", t);}
78 substs.upvar_tys(def_id, cx.tcx()).collect()
80 _ => bug!("{} is not a type that can have fields.", t)
84 /// LLVM-level types are a little complicated.
86 /// C-like enums need to be actual ints, not wrapped in a struct,
87 /// because that changes the ABI on some platforms (see issue #10308).
89 /// For nominal types, in some cases, we need to use LLVM named structs
90 /// and fill in the actual contents in a second pass to prevent
91 /// unbounded recursion; see also the comments in `trans::type_of`.
92 pub fn type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Type {
93 generic_type_of(cx, t, None)
96 pub fn incomplete_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
97 t: Ty<'tcx>, name: &str) -> Type {
98 generic_type_of(cx, t, Some(name))
101 pub fn finish_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
102 t: Ty<'tcx>, llty: &mut Type) {
103 let l = cx.layout_of(t);
104 debug!("finish_type_of: {} with layout {:#?}", t, l);
106 layout::CEnum { .. } | layout::General { .. }
107 | layout::UntaggedUnion { .. } | layout::RawNullablePointer { .. } => { }
108 layout::Univariant { ..}
109 | layout::StructWrappedNullablePointer { .. } => {
110 let (nonnull_variant_index, nonnull_variant, packed) = match *l {
111 layout::Univariant { ref variant, .. } => (0, variant, variant.packed),
112 layout::StructWrappedNullablePointer { nndiscr, ref nonnull, .. } =>
113 (nndiscr, nonnull, nonnull.packed),
116 let fields = compute_fields(cx, t, nonnull_variant_index as usize, true);
117 llty.set_struct_body(&struct_llfields(cx, &fields, nonnull_variant),
120 _ => bug!("This function cannot handle {} with layout {:#?}", t, l)
124 fn generic_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
126 name: Option<&str>) -> Type {
127 let l = cx.layout_of(t);
128 debug!("adt::generic_type_of t: {:?} name: {:?}", t, name);
130 layout::CEnum { discr, .. } => Type::from_integer(cx, discr),
131 layout::RawNullablePointer { nndiscr, .. } => {
132 let (def, substs) = match t.sty {
133 ty::TyAdt(d, s) => (d, s),
134 _ => bug!("{} is not an ADT", t)
136 let nnty = monomorphize::field_ty(cx.tcx(), substs,
137 &def.variants[nndiscr as usize].fields[0]);
138 if let layout::Scalar { value: layout::Pointer, .. } = *cx.layout_of(nnty) {
141 type_of::type_of(cx, nnty)
144 layout::StructWrappedNullablePointer { nndiscr, ref nonnull, .. } => {
145 let fields = compute_fields(cx, t, nndiscr as usize, false);
148 Type::struct_(cx, &struct_llfields(cx, &fields, nonnull),
152 Type::named_struct(cx, name)
156 layout::Univariant { ref variant, .. } => {
157 // Note that this case also handles empty enums.
158 // Thus the true as the final parameter here.
159 let fields = compute_fields(cx, t, 0, true);
162 let fields = struct_llfields(cx, &fields, &variant);
163 Type::struct_(cx, &fields, variant.packed)
166 // Hypothesis: named_struct's can never need a
167 // drop flag. (... needs validation.)
168 Type::named_struct(cx, name)
172 layout::UntaggedUnion { ref variants, .. }=> {
173 // Use alignment-sized ints to fill all the union storage.
174 let size = variants.stride().bytes();
175 let align = variants.align.abi();
176 let fill = union_fill(cx, size, align);
179 Type::struct_(cx, &[fill], variants.packed)
182 let mut llty = Type::named_struct(cx, name);
183 llty.set_struct_body(&[fill], variants.packed);
188 layout::General { discr, size, align, primitive_align, .. } => {
189 // We need a representation that has:
190 // * The alignment of the most-aligned field
191 // * The size of the largest variant (rounded up to that alignment)
192 // * No alignment padding anywhere any variant has actual data
193 // (currently matters only for enums small enough to be immediate)
194 // * The discriminant in an obvious place.
196 // So we start with the discriminant, pad it up to the alignment with
197 // more of its own type, then use alignment-sized ints to get the rest
199 let size = size.bytes();
200 let align = align.abi();
201 let primitive_align = primitive_align.abi();
202 assert!(align <= std::u32::MAX as u64);
203 let discr_ty = Type::from_integer(cx, discr);
204 let discr_size = discr.size().bytes();
205 let padded_discr_size = roundup(discr_size, align as u32);
206 let variant_part_size = size-padded_discr_size;
207 let variant_fill = union_fill(cx, variant_part_size, primitive_align);
209 assert_eq!(machine::llalign_of_min(cx, variant_fill), primitive_align as u32);
210 assert_eq!(padded_discr_size % discr_size, 0); // Ensure discr_ty can fill pad evenly
211 let fields: Vec<Type> =
213 Type::array(&discr_ty, (padded_discr_size - discr_size)/discr_size),
214 variant_fill].iter().cloned().collect();
217 Type::struct_(cx, &fields, false)
220 let mut llty = Type::named_struct(cx, name);
221 llty.set_struct_body(&fields, false);
226 _ => bug!("Unsupported type {} represented as {:#?}", t, l)
230 fn union_fill(cx: &CrateContext, size: u64, align: u64) -> Type {
231 assert_eq!(size%align, 0);
232 assert_eq!(align.count_ones(), 1, "Alignment must be a power fof 2. Got {}", align);
233 let align_units = size/align;
234 let layout_align = layout::Align::from_bytes(align, align).unwrap();
235 if let Some(ity) = layout::Integer::for_abi_align(cx, layout_align) {
236 Type::array(&Type::from_integer(cx, ity), align_units)
238 Type::array(&Type::vector(&Type::i32(cx), align/4),
244 // Double index to account for padding (FieldPath already uses `Struct::memory_index`)
245 fn struct_llfields_path(discrfield: &layout::FieldPath) -> Vec<usize> {
246 discrfield.iter().map(|&i| (i as usize) << 1).collect::<Vec<_>>()
250 // Lookup `Struct::memory_index` and double it to account for padding
251 pub fn struct_llfields_index(variant: &layout::Struct, index: usize) -> usize {
252 (variant.memory_index[index] as usize) << 1
256 pub fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, field_tys: &Vec<Ty<'tcx>>,
257 variant: &layout::Struct) -> Vec<Type> {
258 debug!("struct_llfields: variant: {:?}", variant);
259 let mut first_field = true;
260 let mut min_offset = 0;
261 let mut result: Vec<Type> = Vec::with_capacity(field_tys.len() * 2);
262 let field_iter = variant.field_index_by_increasing_offset().map(|i| {
263 (i, field_tys[i as usize], variant.offsets[i as usize].bytes()) });
264 for (index, ty, target_offset) in field_iter {
266 debug!("struct_llfields: {} ty: {} min_offset: {} target_offset: {}",
267 index, ty, min_offset, target_offset);
270 assert!(target_offset >= min_offset);
271 let padding_bytes = if variant.packed { 0 } else { target_offset - min_offset };
272 result.push(Type::array(&Type::i8(cx), padding_bytes));
273 debug!("struct_llfields: {} ty: {} pad_bytes: {} min_offset: {} target_offset: {}",
274 index, ty, padding_bytes, min_offset, target_offset);
276 let llty = type_of::in_memory_type_of(cx, ty);
278 let layout = cx.layout_of(ty);
279 let target_size = layout.size(&cx.tcx().data_layout).bytes();
280 min_offset = target_offset + target_size;
282 if variant.sized && !field_tys.is_empty() {
283 if variant.stride().bytes() < min_offset {
284 bug!("variant: {:?} stride: {} min_offset: {}", variant, variant.stride().bytes(),
287 let padding_bytes = variant.stride().bytes() - min_offset;
288 debug!("struct_llfields: pad_bytes: {} min_offset: {} min_size: {} stride: {}\n",
289 padding_bytes, min_offset, variant.min_size.bytes(), variant.stride().bytes());
290 result.push(Type::array(&Type::i8(cx), padding_bytes));
291 assert!(result.len() == (field_tys.len() * 2));
293 debug!("struct_llfields: min_offset: {} min_size: {} stride: {}\n",
294 min_offset, variant.min_size.bytes(), variant.stride().bytes());
300 pub fn is_discr_signed<'tcx>(l: &layout::Layout) -> bool {
302 layout::CEnum { signed, .. }=> signed,
307 /// Obtain the actual discriminant of a value.
308 pub fn trans_get_discr<'a, 'tcx>(
309 bcx: &Builder<'a, 'tcx>,
312 alignment: Alignment,
313 cast_to: Option<Type>,
316 debug!("trans_get_discr t: {:?}", t);
317 let l = bcx.ccx.layout_of(t);
320 layout::CEnum { discr, min, max, .. } => {
321 load_discr(bcx, discr, scrutinee, alignment, min, max, range_assert)
323 layout::General { discr, ref variants, .. } => {
324 let ptr = bcx.struct_gep(scrutinee, 0);
325 load_discr(bcx, discr, ptr, alignment,
326 0, variants.len() as u64 - 1,
329 layout::Univariant { .. } | layout::UntaggedUnion { .. } => C_u8(bcx.ccx, 0),
330 layout::RawNullablePointer { nndiscr, .. } => {
331 let cmp = if nndiscr == 0 { IntEQ } else { IntNE };
332 let discr = bcx.load(scrutinee, alignment.to_align());
333 bcx.icmp(cmp, discr, C_null(val_ty(discr)))
335 layout::StructWrappedNullablePointer { nndiscr, ref discrfield, .. } => {
336 struct_wrapped_nullable_bitdiscr(bcx, nndiscr, discrfield, scrutinee, alignment)
338 _ => bug!("{} is not an enum", t)
342 Some(llty) => bcx.intcast(val, llty, is_discr_signed(&l))
346 fn struct_wrapped_nullable_bitdiscr(
349 discrfield: &layout::FieldPath,
351 alignment: Alignment,
353 let path = struct_llfields_path(discrfield);
354 let llptrptr = bcx.gepi(scrutinee, &path);
355 let llptr = bcx.load(llptrptr, alignment.to_align());
356 let cmp = if nndiscr == 0 { IntEQ } else { IntNE };
357 bcx.icmp(cmp, llptr, C_null(val_ty(llptr)))
360 /// Helper for cases where the discriminant is simply loaded.
361 fn load_discr(bcx: &Builder, ity: layout::Integer, ptr: ValueRef,
362 alignment: Alignment, min: u64, max: u64,
365 let llty = Type::from_integer(bcx.ccx, ity);
366 assert_eq!(val_ty(ptr), llty.ptr_to());
367 let bits = ity.size().bits();
369 let bits = bits as usize;
370 let mask = !0u64 >> (64 - bits);
371 // For a (max) discr of -1, max will be `-1 as usize`, which overflows.
372 // However, that is fine here (it would still represent the full range),
373 if max.wrapping_add(1) & mask == min & mask || !range_assert {
374 // i.e., if the range is everything. The lo==hi case would be
375 // rejected by the LLVM verifier (it would mean either an
376 // empty set, which is impossible, or the entire range of the
377 // type, which is pointless).
378 bcx.load(ptr, alignment.to_align())
380 // llvm::ConstantRange can deal with ranges that wrap around,
381 // so an overflow on (max + 1) is fine.
382 bcx.load_range_assert(ptr, min, max.wrapping_add(1), /* signed: */ True,
383 alignment.to_align())
387 /// Set the discriminant for a new value of the given case of the given
389 pub fn trans_set_discr<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, val: ValueRef, to: u64) {
390 let l = bcx.ccx.layout_of(t);
392 layout::CEnum{ discr, min, max, .. } => {
393 assert_discr_in_range(min, max, to);
394 bcx.store(C_integral(Type::from_integer(bcx.ccx, discr), to, true),
397 layout::General{ discr, .. } => {
398 bcx.store(C_integral(Type::from_integer(bcx.ccx, discr), to, true),
399 bcx.struct_gep(val, 0), None);
401 layout::Univariant { .. }
402 | layout::UntaggedUnion { .. }
403 | layout::Vector { .. } => {
406 layout::RawNullablePointer { nndiscr, .. } => {
408 let llptrty = val_ty(val).element_type();
409 bcx.store(C_null(llptrty), val, None);
412 layout::StructWrappedNullablePointer { nndiscr, ref discrfield, ref nonnull, .. } => {
414 if target_sets_discr_via_memset(bcx) {
415 // Issue #34427: As workaround for LLVM bug on
416 // ARM, use memset of 0 on whole struct rather
417 // than storing null to single target field.
418 let llptr = bcx.pointercast(val, Type::i8(bcx.ccx).ptr_to());
419 let fill_byte = C_u8(bcx.ccx, 0);
420 let size = C_uint(bcx.ccx, nonnull.stride().bytes());
421 let align = C_i32(bcx.ccx, nonnull.align.abi() as i32);
422 base::call_memset(bcx, llptr, fill_byte, size, align, false);
424 let path = struct_llfields_path(discrfield);
425 let llptrptr = bcx.gepi(val, &path);
426 let llptrty = val_ty(llptrptr).element_type();
427 bcx.store(C_null(llptrty), llptrptr, None);
431 _ => bug!("Cannot handle {} represented as {:#?}", t, l)
435 fn target_sets_discr_via_memset<'a, 'tcx>(bcx: &Builder<'a, 'tcx>) -> bool {
436 bcx.sess().target.target.arch == "arm" || bcx.sess().target.target.arch == "aarch64"
439 pub fn assert_discr_in_range<D: PartialOrd>(min: D, max: D, discr: D) {
441 assert!(min <= discr && discr <= max)
443 assert!(min <= discr || discr <= max)
447 // FIXME this utility routine should be somewhere more general
449 fn roundup(x: u64, a: u32) -> u64 { let a = a as u64; ((x + (a - 1)) / a) * a }
451 /// Extract a field of a constant value, as appropriate for its
454 /// (Not to be confused with `common::const_get_elt`, which operates on
455 /// raw LLVM-level structs and arrays.)
456 pub fn const_get_field<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>,
458 ix: usize) -> ValueRef {
459 let l = ccx.layout_of(t);
461 layout::CEnum { .. } => bug!("element access in C-like enum const"),
462 layout::Univariant { ref variant, .. } => {
463 const_struct_field(val, variant.memory_index[ix] as usize)
465 layout::Vector { .. } => const_struct_field(val, ix),
466 layout::UntaggedUnion { .. } => const_struct_field(val, 0),
467 _ => bug!("{} does not have fields.", t)
471 /// Extract field of struct-like const, skipping our alignment padding.
472 fn const_struct_field(val: ValueRef, ix: usize) -> ValueRef {
473 // Get the ix-th non-undef element of the struct.
474 let mut real_ix = 0; // actual position in the struct
475 let mut ix = ix; // logical index relative to real_ix
479 field = const_get_elt(val, &[real_ix]);
480 if !is_undef(field) {
483 real_ix = real_ix + 1;
489 real_ix = real_ix + 1;