1 // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
12 * # Representation of Algebraic Data Types
14 * This module determines how to represent enums, structs, and tuples
15 * based on their monomorphized types; it is responsible both for
16 * choosing a representation and translating basic operations on
17 * values of those types. (Note: exporting the representations for
18 * debuggers is handled in debuginfo.rs, not here.)
20 * Note that the interface treats everything as a general case of an
21 * enum, so structs/tuples/etc. have one pseudo-variant with
22 * discriminant 0; i.e., as if they were a univariant enum.
24 * Having everything in one place will enable improvements to data
25 * structure representation; possibilities include:
27 * - User-specified alignment (e.g., cacheline-aligning parts of
28 * concurrently accessed data structures); LLVM can't represent this
29 * directly, so we'd have to insert padding fields in any structure
30 * that might contain one and adjust GEP indices accordingly. See
33 * - Store nested enums' discriminants in the same word. Rather, if
34 * some variants start with enums, and those enums representations
35 * have unused alignment padding between discriminant and body, the
36 * outer enum's discriminant can be stored there and those variants
37 * can start at offset 0. Kind of fancy, and might need work to
38 * make copies of the inner enum type cooperate, but it could help
39 * with `Option` or `Result` wrapped around another enum.
41 * - Tagged pointers would be neat, but given that any type can be
42 * used unboxed and any field can have pointers (including mutable)
43 * taken to it, implementing them for Rust seems difficult.
46 #![allow(unsigned_negation)]
48 pub use self::PointerField::*;
49 pub use self::Repr::*;
54 use llvm::{ValueRef, True, IntEQ, IntNE};
57 use middle::subst::Subst;
61 use trans::cleanup::CleanupMethods;
65 use trans::type_::Type;
67 use middle::ty::{mod, Ty};
71 use syntax::attr::IntType;
72 use util::ppaux::ty_to_string;
74 type Hint = attr::ReprAttr;
78 #[deriving(Eq, PartialEq, Show)]
80 /// C-like enums; basically an int.
81 CEnum(IntType, Disr, Disr), // discriminant range (signedness based on the IntType)
83 * Single-case variants, and structs/tuples/records.
85 * Structs with destructors need a dynamic destroyedness flag to
86 * avoid running the destructor too many times; this is included
87 * in the `Struct` if present.
89 Univariant(Struct<'tcx>, bool),
91 * General-case enums: for each case there is a struct, and they
92 * all start with a field for the discriminant.
94 * Types with destructors need a dynamic destroyedness flag to
95 * avoid running the destructor too many times; the last argument
96 * indicates whether such a flag is present.
98 General(IntType, Vec<Struct<'tcx>>, bool),
100 * Two cases distinguished by a nullable pointer: the case with discriminant
101 * `nndiscr` must have single field which is known to be nonnull due to its type.
102 * The other case is known to be zero sized. Hence we represent the enum
103 * as simply a nullable pointer: if not null it indicates the `nndiscr` variant,
104 * otherwise it indicates the other case.
109 nullfields: Vec<Ty<'tcx>>
112 * Two cases distinguished by a nullable pointer: the case with discriminant
113 * `nndiscr` is represented by the struct `nonnull`, where the `ptrfield`th
114 * field is known to be nonnull due to its type; if that field is null, then
115 * it represents the other case, which is inhabited by at most one value
116 * (and all other fields are undefined/unused).
118 * For example, `std::option::Option` instantiated at a safe pointer type
119 * is represented such that `None` is a null pointer and `Some` is the
122 StructWrappedNullablePointer {
123 nonnull: Struct<'tcx>,
125 ptrfield: PointerField,
126 nullfields: Vec<Ty<'tcx>>,
130 /// For structs, and struct-like parts of anything fancier.
131 #[deriving(Eq, PartialEq, Show)]
132 pub struct Struct<'tcx> {
133 // If the struct is DST, then the size and alignment do not take into
134 // account the unsized fields of the struct.
139 pub fields: Vec<Ty<'tcx>>
143 * Convenience for `represent_type`. There should probably be more or
144 * these, for places in trans where the `Ty` isn't directly
147 pub fn represent_node<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
148 node: ast::NodeId) -> Rc<Repr<'tcx>> {
149 represent_type(bcx.ccx(), node_id_type(bcx, node))
152 /// Decides how to represent a given type.
153 pub fn represent_type<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
154 t: Ty<'tcx>) -> Rc<Repr<'tcx>> {
155 debug!("Representing: {}", ty_to_string(cx.tcx(), t));
156 match cx.adt_reprs().borrow().get(&t) {
157 Some(repr) => return repr.clone(),
161 let repr = Rc::new(represent_type_uncached(cx, t));
162 debug!("Represented as: {}", repr)
163 cx.adt_reprs().borrow_mut().insert(t, repr.clone());
167 fn represent_type_uncached<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
168 t: Ty<'tcx>) -> Repr<'tcx> {
170 ty::ty_tup(ref elems) => {
171 Univariant(mk_struct(cx, elems.as_slice(), false, t), false)
173 ty::ty_struct(def_id, ref substs) => {
174 let fields = ty::lookup_struct_fields(cx.tcx(), def_id);
175 let mut ftys = fields.iter().map(|field| {
176 ty::lookup_field_type(cx.tcx(), def_id, field.id, substs)
177 }).collect::<Vec<_>>();
178 let packed = ty::lookup_packed(cx.tcx(), def_id);
179 let dtor = ty::ty_dtor(cx.tcx(), def_id).has_drop_flag();
180 if dtor { ftys.push(ty::mk_bool()); }
182 Univariant(mk_struct(cx, ftys.as_slice(), packed, t), dtor)
184 ty::ty_unboxed_closure(def_id, _, ref substs) => {
185 let upvars = ty::unboxed_closure_upvars(cx.tcx(), def_id, substs);
186 let upvar_types = upvars.iter().map(|u| u.ty).collect::<Vec<_>>();
187 Univariant(mk_struct(cx, upvar_types.as_slice(), false, t), false)
189 ty::ty_enum(def_id, ref substs) => {
190 let cases = get_cases(cx.tcx(), def_id, substs);
191 let hint = *ty::lookup_repr_hints(cx.tcx(), def_id).as_slice().get(0)
192 .unwrap_or(&attr::ReprAny);
194 let dtor = ty::ty_dtor(cx.tcx(), def_id).has_drop_flag();
196 if cases.len() == 0 {
197 // Uninhabitable; represent as unit
198 // (Typechecking will reject discriminant-sizing attrs.)
199 assert_eq!(hint, attr::ReprAny);
200 let ftys = if dtor { vec!(ty::mk_bool()) } else { vec!() };
201 return Univariant(mk_struct(cx, ftys.as_slice(), false, t),
205 if !dtor && cases.iter().all(|c| c.tys.len() == 0) {
206 // All bodies empty -> intlike
207 let discrs: Vec<u64> = cases.iter().map(|c| c.discr).collect();
208 let bounds = IntBounds {
209 ulo: *discrs.iter().min().unwrap(),
210 uhi: *discrs.iter().max().unwrap(),
211 slo: discrs.iter().map(|n| *n as i64).min().unwrap(),
212 shi: discrs.iter().map(|n| *n as i64).max().unwrap()
214 return mk_cenum(cx, hint, &bounds);
217 // Since there's at least one
218 // non-empty body, explicit discriminants should have
219 // been rejected by a checker before this point.
220 if !cases.iter().enumerate().all(|(i,c)| c.discr == (i as Disr)) {
221 cx.sess().bug(format!("non-C-like enum {} with specified \
223 ty::item_path_str(cx.tcx(),
224 def_id)).as_slice());
227 if cases.len() == 1 {
228 // Equivalent to a struct/tuple/newtype.
229 // (Typechecking will reject discriminant-sizing attrs.)
230 assert_eq!(hint, attr::ReprAny);
231 let mut ftys = cases[0].tys.clone();
232 if dtor { ftys.push(ty::mk_bool()); }
233 return Univariant(mk_struct(cx, ftys.as_slice(), false, t),
237 if !dtor && cases.len() == 2 && hint == attr::ReprAny {
238 // Nullable pointer optimization
241 if cases[1 - discr].is_zerolen(cx, t) {
242 let st = mk_struct(cx, cases[discr].tys.as_slice(),
244 match cases[discr].find_ptr(cx) {
245 Some(ThinPointer(_)) if st.fields.len() == 1 => {
246 return RawNullablePointer {
247 nndiscr: discr as Disr,
249 nullfields: cases[1 - discr].tys.clone()
253 return StructWrappedNullablePointer {
254 nndiscr: discr as Disr,
257 nullfields: cases[1 - discr].tys.clone()
268 assert!((cases.len() - 1) as i64 >= 0);
269 let bounds = IntBounds { ulo: 0, uhi: (cases.len() - 1) as u64,
270 slo: 0, shi: (cases.len() - 1) as i64 };
271 let ity = range_to_inttype(cx, hint, &bounds);
273 let fields : Vec<_> = cases.iter().map(|c| {
274 let mut ftys = vec!(ty_of_inttype(ity));
275 ftys.push_all(c.tys.as_slice());
276 if dtor { ftys.push(ty::mk_bool()); }
277 mk_struct(cx, ftys.as_slice(), false, t)
280 ensure_enum_fits_in_address_space(cx, ity, fields.as_slice(), t);
282 General(ity, fields, dtor)
284 _ => cx.sess().bug(format!("adt::represent_type called on non-ADT type: {}",
285 ty_to_string(cx.tcx(), t)).as_slice())
289 // this should probably all be in ty
296 #[deriving(Eq, PartialEq, Show)]
297 pub enum PointerField {
302 impl<'tcx> Case<'tcx> {
303 fn is_zerolen<'a>(&self, cx: &CrateContext<'a, 'tcx>, scapegoat: Ty<'tcx>) -> bool {
304 mk_struct(cx, self.tys.as_slice(), false, scapegoat).size == 0
307 fn find_ptr<'a>(&self, cx: &CrateContext<'a, 'tcx>) -> Option<PointerField> {
308 for (i, &ty) in self.tys.iter().enumerate() {
310 // &T/&mut T/Box<T> could either be a thin or fat pointer depending on T
311 ty::ty_rptr(_, ty::mt { ty, .. }) | ty::ty_uniq(ty) => match ty.sty {
312 // &[T] and &str are a pointer and length pair
313 ty::ty_vec(_, None) | ty::ty_str => return Some(FatPointer(i)),
315 // &Trait is a pair of pointers: the actual object and a vtable
316 ty::ty_trait(..) => return Some(FatPointer(i)),
318 ty::ty_struct(..) if !ty::type_is_sized(cx.tcx(), ty) => {
319 return Some(FatPointer(i))
322 // Any other &T is just a pointer
323 _ => return Some(ThinPointer(i))
326 // Functions are just pointers
327 ty::ty_bare_fn(..) => return Some(ThinPointer(i)),
329 // Closures are a pair of pointers: the code and environment
330 ty::ty_closure(..) => return Some(FatPointer(i)),
332 // Anything else is not a pointer
341 fn get_cases<'tcx>(tcx: &ty::ctxt<'tcx>,
343 substs: &subst::Substs<'tcx>)
345 ty::enum_variants(tcx, def_id).iter().map(|vi| {
346 let arg_tys = vi.args.iter().map(|&raw_ty| {
347 raw_ty.subst(tcx, substs)
349 Case { discr: vi.disr_val, tys: arg_tys }
353 fn mk_struct<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
354 tys: &[Ty<'tcx>], packed: bool,
357 let sized = tys.iter().all(|&ty| ty::type_is_sized(cx.tcx(), ty));
358 let lltys : Vec<Type> = if sized {
360 .map(|&ty| type_of::sizing_type_of(cx, ty)).collect()
362 tys.iter().filter(|&ty| ty::type_is_sized(cx.tcx(), *ty))
363 .map(|&ty| type_of::sizing_type_of(cx, ty)).collect()
366 ensure_struct_fits_in_address_space(cx, lltys.as_slice(), packed, scapegoat);
368 let llty_rec = Type::struct_(cx, lltys.as_slice(), packed);
370 size: machine::llsize_of_alloc(cx, llty_rec),
371 align: machine::llalign_of_min(cx, llty_rec),
374 fields: tys.to_vec(),
386 fn mk_cenum<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
387 hint: Hint, bounds: &IntBounds)
389 let it = range_to_inttype(cx, hint, bounds);
391 attr::SignedInt(_) => CEnum(it, bounds.slo as Disr, bounds.shi as Disr),
392 attr::UnsignedInt(_) => CEnum(it, bounds.ulo, bounds.uhi)
396 fn range_to_inttype(cx: &CrateContext, hint: Hint, bounds: &IntBounds) -> IntType {
397 debug!("range_to_inttype: {} {}", hint, bounds);
398 // Lists of sizes to try. u64 is always allowed as a fallback.
399 #[allow(non_upper_case_globals)]
400 static choose_shortest: &'static[IntType] = &[
401 attr::UnsignedInt(ast::TyU8), attr::SignedInt(ast::TyI8),
402 attr::UnsignedInt(ast::TyU16), attr::SignedInt(ast::TyI16),
403 attr::UnsignedInt(ast::TyU32), attr::SignedInt(ast::TyI32)];
404 #[allow(non_upper_case_globals)]
405 static at_least_32: &'static[IntType] = &[
406 attr::UnsignedInt(ast::TyU32), attr::SignedInt(ast::TyI32)];
410 attr::ReprInt(span, ity) => {
411 if !bounds_usable(cx, ity, bounds) {
412 cx.sess().span_bug(span, "representation hint insufficient for discriminant range")
416 attr::ReprExtern => {
417 attempts = match cx.sess().target.target.arch.as_slice() {
418 // WARNING: the ARM EABI has two variants; the one corresponding to `at_least_32`
419 // appears to be used on Linux and NetBSD, but some systems may use the variant
420 // corresponding to `choose_shortest`. However, we don't run on those yet...?
421 "arm" => at_least_32,
426 attempts = choose_shortest;
428 attr::ReprPacked => {
429 cx.tcx().sess.bug("range_to_inttype: found ReprPacked on an enum");
432 for &ity in attempts.iter() {
433 if bounds_usable(cx, ity, bounds) {
437 return attr::UnsignedInt(ast::TyU64);
440 pub fn ll_inttype(cx: &CrateContext, ity: IntType) -> Type {
442 attr::SignedInt(t) => Type::int_from_ty(cx, t),
443 attr::UnsignedInt(t) => Type::uint_from_ty(cx, t)
447 fn bounds_usable(cx: &CrateContext, ity: IntType, bounds: &IntBounds) -> bool {
448 debug!("bounds_usable: {} {}", ity, bounds);
450 attr::SignedInt(_) => {
451 let lllo = C_integral(ll_inttype(cx, ity), bounds.slo as u64, true);
452 let llhi = C_integral(ll_inttype(cx, ity), bounds.shi as u64, true);
453 bounds.slo == const_to_int(lllo) as i64 && bounds.shi == const_to_int(llhi) as i64
455 attr::UnsignedInt(_) => {
456 let lllo = C_integral(ll_inttype(cx, ity), bounds.ulo, false);
457 let llhi = C_integral(ll_inttype(cx, ity), bounds.uhi, false);
458 bounds.ulo == const_to_uint(lllo) as u64 && bounds.uhi == const_to_uint(llhi) as u64
463 // FIXME(#17596) Ty<'tcx> is incorrectly invariant w.r.t 'tcx.
464 pub fn ty_of_inttype<'tcx>(ity: IntType) -> Ty<'tcx> {
466 attr::SignedInt(t) => ty::mk_mach_int(t),
467 attr::UnsignedInt(t) => ty::mk_mach_uint(t)
471 // LLVM doesn't like types that don't fit in the address space
472 fn ensure_struct_fits_in_address_space<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
475 scapegoat: Ty<'tcx>) {
477 for &llty in fields.iter() {
478 // Invariant: offset < ccx.obj_size_bound() <= 1<<61
480 let type_align = machine::llalign_of_min(ccx, llty);
481 offset = roundup(offset, type_align);
483 // type_align is a power-of-2, so still offset < ccx.obj_size_bound()
484 // llsize_of_alloc(ccx, llty) is also less than ccx.obj_size_bound()
485 // so the sum is less than 1<<62 (and therefore can't overflow).
486 offset += machine::llsize_of_alloc(ccx, llty);
488 if offset >= ccx.obj_size_bound() {
489 ccx.report_overbig_object(scapegoat);
494 fn union_size_and_align(sts: &[Struct]) -> (machine::llsize, machine::llalign) {
495 let size = sts.iter().map(|st| st.size).max().unwrap();
496 let most_aligned = sts.iter().max_by(|st| st.align).unwrap();
497 (size, most_aligned.align)
500 fn ensure_enum_fits_in_address_space<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
503 scapegoat: Ty<'tcx>) {
504 let discr_size = machine::llsize_of_alloc(ccx, ll_inttype(ccx, discr));
505 let (field_size, field_align) = union_size_and_align(fields);
507 // field_align < 1<<32, discr_size <= 8, field_size < OBJ_SIZE_BOUND <= 1<<61
508 // so the sum is less than 1<<62 (and can't overflow).
509 let total_size = roundup(discr_size, field_align) + field_size;
511 if total_size >= ccx.obj_size_bound() {
512 ccx.report_overbig_object(scapegoat);
518 * LLVM-level types are a little complicated.
520 * C-like enums need to be actual ints, not wrapped in a struct,
521 * because that changes the ABI on some platforms (see issue #10308).
523 * For nominal types, in some cases, we need to use LLVM named structs
524 * and fill in the actual contents in a second pass to prevent
525 * unbounded recursion; see also the comments in `trans::type_of`.
527 pub fn type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, r: &Repr<'tcx>) -> Type {
528 generic_type_of(cx, r, None, false, false)
530 // Pass dst=true if the type you are passing is a DST. Yes, we could figure
531 // this out, but if you call this on an unsized type without realising it, you
532 // are going to get the wrong type (it will not include the unsized parts of it).
533 pub fn sizing_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
534 r: &Repr<'tcx>, dst: bool) -> Type {
535 generic_type_of(cx, r, None, true, dst)
537 pub fn incomplete_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
538 r: &Repr<'tcx>, name: &str) -> Type {
539 generic_type_of(cx, r, Some(name), false, false)
541 pub fn finish_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
542 r: &Repr<'tcx>, llty: &mut Type) {
544 CEnum(..) | General(..) | RawNullablePointer { .. } => { }
545 Univariant(ref st, _) | StructWrappedNullablePointer { nonnull: ref st, .. } =>
546 llty.set_struct_body(struct_llfields(cx, st, false, false).as_slice(),
551 fn generic_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
557 CEnum(ity, _, _) => ll_inttype(cx, ity),
558 RawNullablePointer { nnty, .. } => type_of::sizing_type_of(cx, nnty),
559 Univariant(ref st, _) | StructWrappedNullablePointer { nonnull: ref st, .. } => {
562 Type::struct_(cx, struct_llfields(cx, st, sizing, dst).as_slice(),
565 Some(name) => { assert_eq!(sizing, false); Type::named_struct(cx, name) }
568 General(ity, ref sts, _) => {
569 // We need a representation that has:
570 // * The alignment of the most-aligned field
571 // * The size of the largest variant (rounded up to that alignment)
572 // * No alignment padding anywhere any variant has actual data
573 // (currently matters only for enums small enough to be immediate)
574 // * The discriminant in an obvious place.
576 // So we start with the discriminant, pad it up to the alignment with
577 // more of its own type, then use alignment-sized ints to get the rest
580 // FIXME #10604: this breaks when vector types are present.
581 let (size, align) = union_size_and_align(sts.as_slice());
582 let align_s = align as u64;
583 let discr_ty = ll_inttype(cx, ity);
584 let discr_size = machine::llsize_of_alloc(cx, discr_ty);
585 let align_units = (size + align_s - 1) / align_s - 1;
586 let pad_ty = match align_s {
587 1 => Type::array(&Type::i8(cx), align_units),
588 2 => Type::array(&Type::i16(cx), align_units),
589 4 => Type::array(&Type::i32(cx), align_units),
590 8 if machine::llalign_of_min(cx, Type::i64(cx)) == 8 =>
591 Type::array(&Type::i64(cx), align_units),
592 a if a.count_ones() == 1 => Type::array(&Type::vector(&Type::i32(cx), a / 4),
594 _ => panic!("unsupported enum alignment: {}", align)
596 assert_eq!(machine::llalign_of_min(cx, pad_ty), align);
597 assert_eq!(align_s % discr_size, 0);
598 let fields = vec!(discr_ty,
599 Type::array(&discr_ty, align_s / discr_size - 1),
602 None => Type::struct_(cx, fields.as_slice(), false),
604 let mut llty = Type::named_struct(cx, name);
605 llty.set_struct_body(fields.as_slice(), false);
613 fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, st: &Struct<'tcx>,
614 sizing: bool, dst: bool) -> Vec<Type> {
616 st.fields.iter().filter(|&ty| !dst || ty::type_is_sized(cx.tcx(), *ty))
617 .map(|&ty| type_of::sizing_type_of(cx, ty)).collect()
619 st.fields.iter().map(|&ty| type_of::type_of(cx, ty)).collect()
624 * Obtain a representation of the discriminant sufficient to translate
625 * destructuring; this may or may not involve the actual discriminant.
627 * This should ideally be less tightly tied to `_match`.
629 pub fn trans_switch<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
630 r: &Repr<'tcx>, scrutinee: ValueRef)
631 -> (_match::BranchKind, Option<ValueRef>) {
633 CEnum(..) | General(..) |
634 RawNullablePointer { .. } | StructWrappedNullablePointer { .. } => {
635 (_match::Switch, Some(trans_get_discr(bcx, r, scrutinee, None)))
638 (_match::Single, None)
645 /// Obtain the actual discriminant of a value.
646 pub fn trans_get_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr<'tcx>,
647 scrutinee: ValueRef, cast_to: Option<Type>)
651 debug!("trans_get_discr r: {}", r);
653 CEnum(ity, min, max) => {
654 val = load_discr(bcx, ity, scrutinee, min, max);
655 signed = ity.is_signed();
657 General(ity, ref cases, _) => {
658 let ptr = GEPi(bcx, scrutinee, &[0, 0]);
659 val = load_discr(bcx, ity, ptr, 0, (cases.len() - 1) as Disr);
660 signed = ity.is_signed();
663 val = C_u8(bcx.ccx(), 0);
666 RawNullablePointer { nndiscr, nnty, .. } => {
667 let cmp = if nndiscr == 0 { IntEQ } else { IntNE };
668 let llptrty = type_of::sizing_type_of(bcx.ccx(), nnty);
669 val = ICmp(bcx, cmp, Load(bcx, scrutinee), C_null(llptrty));
672 StructWrappedNullablePointer { nndiscr, ptrfield, .. } => {
673 val = struct_wrapped_nullable_bitdiscr(bcx, nndiscr, ptrfield, scrutinee);
679 Some(llty) => if signed { SExt(bcx, val, llty) } else { ZExt(bcx, val, llty) }
683 fn struct_wrapped_nullable_bitdiscr(bcx: Block, nndiscr: Disr, ptrfield: PointerField,
684 scrutinee: ValueRef) -> ValueRef {
685 let llptrptr = match ptrfield {
686 ThinPointer(field) => GEPi(bcx, scrutinee, &[0, field]),
687 FatPointer(field) => GEPi(bcx, scrutinee, &[0, field, abi::FAT_PTR_ADDR])
689 let llptr = Load(bcx, llptrptr);
690 let cmp = if nndiscr == 0 { IntEQ } else { IntNE };
691 ICmp(bcx, cmp, llptr, C_null(val_ty(llptr)))
694 /// Helper for cases where the discriminant is simply loaded.
695 fn load_discr(bcx: Block, ity: IntType, ptr: ValueRef, min: Disr, max: Disr)
697 let llty = ll_inttype(bcx.ccx(), ity);
698 assert_eq!(val_ty(ptr), llty.ptr_to());
699 let bits = machine::llbitsize_of_real(bcx.ccx(), llty);
701 let bits = bits as uint;
702 let mask = (-1u64 >> (64 - bits)) as Disr;
703 if (max + 1) & mask == min & mask {
704 // i.e., if the range is everything. The lo==hi case would be
705 // rejected by the LLVM verifier (it would mean either an
706 // empty set, which is impossible, or the entire range of the
707 // type, which is pointless).
710 // llvm::ConstantRange can deal with ranges that wrap around,
711 // so an overflow on (max + 1) is fine.
712 LoadRangeAssert(bcx, ptr, min, (max+1), /* signed: */ True)
717 * Yield information about how to dispatch a case of the
718 * discriminant-like value returned by `trans_switch`.
720 * This should ideally be less tightly tied to `_match`.
722 pub fn trans_case<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr, discr: Disr)
723 -> _match::OptResult<'blk, 'tcx> {
725 CEnum(ity, _, _) => {
726 _match::SingleResult(Result::new(bcx, C_integral(ll_inttype(bcx.ccx(), ity),
727 discr as u64, true)))
729 General(ity, _, _) => {
730 _match::SingleResult(Result::new(bcx, C_integral(ll_inttype(bcx.ccx(), ity),
731 discr as u64, true)))
734 bcx.ccx().sess().bug("no cases for univariants or structs")
736 RawNullablePointer { .. } |
737 StructWrappedNullablePointer { .. } => {
738 assert!(discr == 0 || discr == 1);
739 _match::SingleResult(Result::new(bcx, C_bool(bcx.ccx(), discr != 0)))
745 * Set the discriminant for a new value of the given case of the given
748 pub fn trans_set_discr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr<'tcx>,
749 val: ValueRef, discr: Disr) {
751 CEnum(ity, min, max) => {
752 assert_discr_in_range(ity, min, max, discr);
753 Store(bcx, C_integral(ll_inttype(bcx.ccx(), ity), discr as u64, true),
756 General(ity, ref cases, dtor) => {
758 let ptr = trans_field_ptr(bcx, r, val, discr,
759 cases[discr as uint].fields.len() - 2);
760 Store(bcx, C_u8(bcx.ccx(), 1), ptr);
762 Store(bcx, C_integral(ll_inttype(bcx.ccx(), ity), discr as u64, true),
763 GEPi(bcx, val, &[0, 0]))
765 Univariant(ref st, dtor) => {
766 assert_eq!(discr, 0);
768 Store(bcx, C_u8(bcx.ccx(), 1),
769 GEPi(bcx, val, &[0, st.fields.len() - 1]));
772 RawNullablePointer { nndiscr, nnty, ..} => {
773 if discr != nndiscr {
774 let llptrty = type_of::sizing_type_of(bcx.ccx(), nnty);
775 Store(bcx, C_null(llptrty), val)
778 StructWrappedNullablePointer { ref nonnull, nndiscr, ptrfield, .. } => {
779 if discr != nndiscr {
780 let (llptrptr, llptrty) = match ptrfield {
781 ThinPointer(field) =>
782 (GEPi(bcx, val, &[0, field]),
783 type_of::type_of(bcx.ccx(), nonnull.fields[field])),
784 FatPointer(field) => {
785 let v = GEPi(bcx, val, &[0, field, abi::FAT_PTR_ADDR]);
786 (v, val_ty(v).element_type())
789 Store(bcx, C_null(llptrty), llptrptr)
795 fn assert_discr_in_range(ity: IntType, min: Disr, max: Disr, discr: Disr) {
797 attr::UnsignedInt(_) => assert!(min <= discr && discr <= max),
798 attr::SignedInt(_) => assert!(min as i64 <= discr as i64 && discr as i64 <= max as i64)
803 * The number of fields in a given case; for use when obtaining this
804 * information from the type or definition is less convenient.
806 pub fn num_args(r: &Repr, discr: Disr) -> uint {
809 Univariant(ref st, dtor) => {
810 assert_eq!(discr, 0);
811 st.fields.len() - (if dtor { 1 } else { 0 })
813 General(_, ref cases, dtor) => {
814 cases[discr as uint].fields.len() - 1 - (if dtor { 1 } else { 0 })
816 RawNullablePointer { nndiscr, ref nullfields, .. } => {
817 if discr == nndiscr { 1 } else { nullfields.len() }
819 StructWrappedNullablePointer { ref nonnull, nndiscr,
820 ref nullfields, .. } => {
821 if discr == nndiscr { nonnull.fields.len() } else { nullfields.len() }
826 /// Access a field, at a point when the value's case is known.
827 pub fn trans_field_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr<'tcx>,
828 val: ValueRef, discr: Disr, ix: uint) -> ValueRef {
829 // Note: if this ever needs to generate conditionals (e.g., if we
830 // decide to do some kind of cdr-coding-like non-unique repr
831 // someday), it will need to return a possibly-new bcx as well.
834 bcx.ccx().sess().bug("element access in C-like enum")
836 Univariant(ref st, _dtor) => {
837 assert_eq!(discr, 0);
838 struct_field_ptr(bcx, st, val, ix, false)
840 General(_, ref cases, _) => {
841 struct_field_ptr(bcx, &cases[discr as uint], val, ix + 1, true)
843 RawNullablePointer { nndiscr, ref nullfields, .. } |
844 StructWrappedNullablePointer { nndiscr, ref nullfields, .. } if discr != nndiscr => {
845 // The unit-like case might have a nonzero number of unit-like fields.
846 // (e.d., Result of Either with (), as one side.)
847 let ty = type_of::type_of(bcx.ccx(), nullfields[ix]);
848 assert_eq!(machine::llsize_of_alloc(bcx.ccx(), ty), 0);
849 // The contents of memory at this pointer can't matter, but use
850 // the value that's "reasonable" in case of pointer comparison.
851 PointerCast(bcx, val, ty.ptr_to())
853 RawNullablePointer { nndiscr, nnty, .. } => {
855 assert_eq!(discr, nndiscr);
856 let ty = type_of::type_of(bcx.ccx(), nnty);
857 PointerCast(bcx, val, ty.ptr_to())
859 StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => {
860 assert_eq!(discr, nndiscr);
861 struct_field_ptr(bcx, nonnull, val, ix, false)
866 pub fn struct_field_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, st: &Struct<'tcx>, val: ValueRef,
867 ix: uint, needs_cast: bool) -> ValueRef {
868 let val = if needs_cast {
870 let fields = st.fields.iter().map(|&ty| type_of::type_of(ccx, ty)).collect::<Vec<_>>();
871 let real_ty = Type::struct_(ccx, fields.as_slice(), st.packed);
872 PointerCast(bcx, val, real_ty.ptr_to())
877 GEPi(bcx, val, &[0, ix])
880 pub fn fold_variants<'blk, 'tcx>(
881 bcx: Block<'blk, 'tcx>, r: &Repr<'tcx>, value: ValueRef,
882 f: |Block<'blk, 'tcx>, &Struct<'tcx>, ValueRef| -> Block<'blk, 'tcx>)
883 -> Block<'blk, 'tcx> {
886 Univariant(ref st, _) => {
889 General(ity, ref cases, _) => {
891 let unr_cx = fcx.new_temp_block("enum-variant-iter-unr");
894 let discr_val = trans_get_discr(bcx, r, value, None);
895 let llswitch = Switch(bcx, discr_val, unr_cx.llbb, cases.len());
896 let bcx_next = fcx.new_temp_block("enum-variant-iter-next");
898 for (discr, case) in cases.iter().enumerate() {
899 let mut variant_cx = fcx.new_temp_block(
900 format!("enum-variant-iter-{}", discr.to_string()).as_slice()
902 let rhs_val = C_integral(ll_inttype(ccx, ity), discr as u64, true);
903 AddCase(llswitch, rhs_val, variant_cx.llbb);
905 let fields = case.fields.iter().map(|&ty|
906 type_of::type_of(bcx.ccx(), ty)).collect::<Vec<_>>();
907 let real_ty = Type::struct_(ccx, fields.as_slice(), case.packed);
908 let variant_value = PointerCast(variant_cx, value, real_ty.ptr_to());
910 variant_cx = f(variant_cx, case, variant_value);
911 Br(variant_cx, bcx_next.llbb);
920 /// Access the struct drop flag, if present.
921 pub fn trans_drop_flag_ptr<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, r: &Repr<'tcx>, val: ValueRef)
922 -> datum::DatumBlock<'blk, 'tcx, datum::Expr> {
923 let ptr_ty = ty::mk_imm_ptr(bcx.tcx(), ty::mk_bool());
925 Univariant(ref st, true) => {
926 let flag_ptr = GEPi(bcx, val, &[0, st.fields.len() - 1]);
927 datum::immediate_rvalue_bcx(bcx, flag_ptr, ptr_ty).to_expr_datumblock()
929 General(_, _, true) => {
931 let custom_cleanup_scope = fcx.push_custom_cleanup_scope();
932 let scratch = unpack_datum!(bcx, datum::lvalue_scratch_datum(
933 bcx, ty::mk_bool(), "drop_flag", false,
934 cleanup::CustomScope(custom_cleanup_scope), (), |_, bcx, _| bcx
936 bcx = fold_variants(bcx, r, val, |variant_cx, st, value| {
937 let ptr = struct_field_ptr(variant_cx, st, value, (st.fields.len() - 1), false);
938 datum::Datum::new(ptr, ptr_ty, datum::Rvalue::new(datum::ByRef))
939 .store_to(variant_cx, scratch.val)
941 let expr_datum = scratch.to_expr_datum();
942 fcx.pop_custom_cleanup_scope(custom_cleanup_scope);
943 datum::DatumBlock::new(bcx, expr_datum)
945 _ => bcx.ccx().sess().bug("tried to get drop flag of non-droppable type")
950 * Construct a constant value, suitable for initializing a
951 * GlobalVariable, given a case and constant values for its fields.
952 * Note that this may have a different LLVM type (and different
953 * alignment!) from the representation's `type_of`, so it needs a
954 * pointer cast before use.
956 * The LLVM type system does not directly support unions, and only
957 * pointers can be bitcast, so a constant (and, by extension, the
958 * GlobalVariable initialized by it) will have a type that can vary
959 * depending on which case of an enum it is.
961 * To understand the alignment situation, consider `enum E { V64(u64),
962 * V32(u32, u32) }` on Windows. The type has 8-byte alignment to
963 * accommodate the u64, but `V32(x, y)` would have LLVM type `{i32,
964 * i32, i32}`, which is 4-byte aligned.
966 * Currently the returned value has the same size as the type, but
967 * this could be changed in the future to avoid allocating unnecessary
968 * space after values of shorter-than-maximum cases.
970 pub fn trans_const<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, r: &Repr<'tcx>, discr: Disr,
971 vals: &[ValueRef]) -> ValueRef {
973 CEnum(ity, min, max) => {
974 assert_eq!(vals.len(), 0);
975 assert_discr_in_range(ity, min, max, discr);
976 C_integral(ll_inttype(ccx, ity), discr as u64, true)
978 General(ity, ref cases, _) => {
979 let case = &cases[discr as uint];
980 let max_sz = cases.iter().map(|x| x.size).max().unwrap();
981 let lldiscr = C_integral(ll_inttype(ccx, ity), discr as u64, true);
982 let mut f = vec![lldiscr];
984 let mut contents = build_const_struct(ccx, case, f.as_slice());
985 contents.push_all(&[padding(ccx, max_sz - case.size)]);
986 C_struct(ccx, contents.as_slice(), false)
988 Univariant(ref st, _dro) => {
990 let contents = build_const_struct(ccx, st, vals);
991 C_struct(ccx, contents.as_slice(), st.packed)
993 RawNullablePointer { nndiscr, nnty, .. } => {
994 if discr == nndiscr {
995 assert_eq!(vals.len(), 1);
998 C_null(type_of::sizing_type_of(ccx, nnty))
1001 StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => {
1002 if discr == nndiscr {
1003 C_struct(ccx, build_const_struct(ccx,
1008 let vals = nonnull.fields.iter().map(|&ty| {
1009 // Always use null even if it's not the `ptrfield`th
1010 // field; see #8506.
1011 C_null(type_of::sizing_type_of(ccx, ty))
1012 }).collect::<Vec<ValueRef>>();
1013 C_struct(ccx, build_const_struct(ccx,
1015 vals.as_slice()).as_slice(),
1023 * Compute struct field offsets relative to struct begin.
1025 fn compute_struct_field_offsets<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
1026 st: &Struct<'tcx>) -> Vec<u64> {
1027 let mut offsets = vec!();
1030 for &ty in st.fields.iter() {
1031 let llty = type_of::sizing_type_of(ccx, ty);
1033 let type_align = type_of::align_of(ccx, ty);
1034 offset = roundup(offset, type_align);
1036 offsets.push(offset);
1037 offset += machine::llsize_of_alloc(ccx, llty);
1039 assert_eq!(st.fields.len(), offsets.len());
1044 * Building structs is a little complicated, because we might need to
1045 * insert padding if a field's value is less aligned than its type.
1047 * Continuing the example from `trans_const`, a value of type `(u32,
1048 * E)` should have the `E` at offset 8, but if that field's
1049 * initializer is 4-byte aligned then simply translating the tuple as
1050 * a two-element struct will locate it at offset 4, and accesses to it
1051 * will read the wrong memory.
1053 fn build_const_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
1054 st: &Struct<'tcx>, vals: &[ValueRef])
1056 assert_eq!(vals.len(), st.fields.len());
1058 let target_offsets = compute_struct_field_offsets(ccx, st);
1060 // offset of current value
1062 let mut cfields = Vec::new();
1063 for (&val, &target_offset) in vals.iter().zip(target_offsets.iter()) {
1065 let val_align = machine::llalign_of_min(ccx, val_ty(val));
1066 offset = roundup(offset, val_align);
1068 if offset != target_offset {
1069 cfields.push(padding(ccx, target_offset - offset));
1070 offset = target_offset;
1072 assert!(!is_undef(val));
1074 offset += machine::llsize_of_alloc(ccx, val_ty(val));
1077 assert!(st.sized && offset <= st.size);
1078 if offset != st.size {
1079 cfields.push(padding(ccx, st.size - offset));
1085 fn padding(ccx: &CrateContext, size: u64) -> ValueRef {
1086 C_undef(Type::array(&Type::i8(ccx), size))
1089 // FIXME this utility routine should be somewhere more general
1091 fn roundup(x: u64, a: u32) -> u64 { let a = a as u64; ((x + (a - 1)) / a) * a }
1093 /// Get the discriminant of a constant value. (Not currently used.)
1094 pub fn const_get_discrim(ccx: &CrateContext, r: &Repr, val: ValueRef)
1097 CEnum(ity, _, _) => {
1099 attr::SignedInt(..) => const_to_int(val) as Disr,
1100 attr::UnsignedInt(..) => const_to_uint(val) as Disr
1103 General(ity, _, _) => {
1105 attr::SignedInt(..) => const_to_int(const_get_elt(ccx, val, &[0])) as Disr,
1106 attr::UnsignedInt(..) => const_to_uint(const_get_elt(ccx, val, &[0])) as Disr
1109 Univariant(..) => 0,
1110 RawNullablePointer { nndiscr, .. } => {
1112 /* subtraction as uint is ok because nndiscr is either 0 or 1 */
1113 (1 - nndiscr) as Disr
1118 StructWrappedNullablePointer { nndiscr, ptrfield, .. } => {
1119 let (idx, sub_idx) = match ptrfield {
1120 ThinPointer(field) => (field, None),
1121 FatPointer(field) => (field, Some(abi::FAT_PTR_ADDR))
1123 if is_null(const_struct_field(ccx, val, idx, sub_idx)) {
1124 /* subtraction as uint is ok because nndiscr is either 0 or 1 */
1125 (1 - nndiscr) as Disr
1134 * Extract a field of a constant value, as appropriate for its
1137 * (Not to be confused with `common::const_get_elt`, which operates on
1138 * raw LLVM-level structs and arrays.)
1140 pub fn const_get_field(ccx: &CrateContext, r: &Repr, val: ValueRef,
1141 _discr: Disr, ix: uint) -> ValueRef {
1143 CEnum(..) => ccx.sess().bug("element access in C-like enum const"),
1144 Univariant(..) => const_struct_field(ccx, val, ix, None),
1145 General(..) => const_struct_field(ccx, val, ix + 1, None),
1146 RawNullablePointer { .. } => {
1150 StructWrappedNullablePointer{ .. } => const_struct_field(ccx, val, ix, None)
1154 /// Extract field of struct-like const, skipping our alignment padding.
1155 fn const_struct_field(ccx: &CrateContext, val: ValueRef, ix: uint, sub_idx: Option<uint>)
1157 // Get the ix-th non-undef element of the struct.
1158 let mut real_ix = 0; // actual position in the struct
1159 let mut ix = ix; // logical index relative to real_ix
1163 field = match sub_idx {
1164 Some(si) => const_get_elt(ccx, val, &[real_ix, si as u32]),
1165 None => const_get_elt(ccx, val, &[real_ix])
1167 if !is_undef(field) {
1170 real_ix = real_ix + 1;
1176 real_ix = real_ix + 1;