use llvm::{ValueRef, True, IntEQ, IntNE};
use rustc::ty::layout;
use rustc::ty::{self, Ty, AdtKind};
-use rustc::mir::tcx::LvalueTy;
-use mir::lvalue::LvalueRef;
use common::*;
use builder::Builder;
-use glue;
use base;
use machine;
use monomorphize;
use type_::Type;
use type_of;
-use value::Value;
#[derive(Copy, Clone, PartialEq)]
pub enum BranchKind {
}
}
-/// Access a field, at a point when the value's case is known.
-pub fn trans_field_ptr<'a, 'tcx>(
- bcx: &Builder<'a, 'tcx>,
- val: LvalueRef<'tcx>,
- ix: usize,
-) -> ValueRef {
- let discr = match val.ty {
- LvalueTy::Ty { .. } => 0,
- LvalueTy::Downcast { variant_index, .. } => variant_index,
- };
- let t = val.ty.to_ty(bcx.tcx());
- let l = bcx.ccx.layout_of(t);
- // Note: if this ever needs to generate conditionals (e.g., if we
- // decide to do some kind of cdr-coding-like non-unique repr
- // someday), it will need to return a possibly-new bcx as well.
- match *l {
- layout::Univariant { ref variant, .. } => {
- assert_eq!(discr, 0);
- struct_field_ptr(bcx, &variant,
- &compute_fields(bcx.ccx, t, 0, false),
- val, ix, false)
- }
- layout::Vector { count, .. } => {
- assert_eq!(discr, 0);
- assert!((ix as u64) < count);
- bcx.struct_gep(val.llval, ix)
- }
- layout::General { discr: d, ref variants, .. } => {
- let mut fields = compute_fields(bcx.ccx, t, discr, false);
- fields.insert(0, d.to_ty(&bcx.tcx(), false));
- struct_field_ptr(bcx, &variants[discr],
- &fields,
- val, ix + 1, true)
- }
- layout::UntaggedUnion { .. } => {
- let fields = compute_fields(bcx.ccx, t, 0, false);
- let ty = type_of::in_memory_type_of(bcx.ccx, fields[ix]);
- bcx.pointercast(val.llval, ty.ptr_to())
- }
- layout::RawNullablePointer { nndiscr, .. } |
- layout::StructWrappedNullablePointer { nndiscr, .. } if discr as u64 != nndiscr => {
- let nullfields = compute_fields(bcx.ccx, t, (1-nndiscr) as usize, false);
- // The unit-like case might have a nonzero number of unit-like fields.
- // (e.d., Result of Either with (), as one side.)
- let ty = type_of::type_of(bcx.ccx, nullfields[ix]);
- assert_eq!(machine::llsize_of_alloc(bcx.ccx, ty), 0);
- bcx.pointercast(val.llval, ty.ptr_to())
- }
- layout::RawNullablePointer { nndiscr, .. } => {
- let nnty = compute_fields(bcx.ccx, t, nndiscr as usize, false)[0];
- assert_eq!(ix, 0);
- assert_eq!(discr as u64, nndiscr);
- let ty = type_of::type_of(bcx.ccx, nnty);
- bcx.pointercast(val.llval, ty.ptr_to())
- }
- layout::StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => {
- assert_eq!(discr as u64, nndiscr);
- struct_field_ptr(bcx, &nonnull,
- &compute_fields(bcx.ccx, t, discr, false),
- val, ix, false)
- }
- _ => bug!("element access in type without elements: {} represented as {:#?}", t, l)
- }
-}
-
-fn struct_field_ptr<'a, 'tcx>(
- bcx: &Builder<'a, 'tcx>,
- st: &layout::Struct,
- fields: &Vec<Ty<'tcx>>,
- val: LvalueRef,
- ix: usize,
- needs_cast: bool
-) -> ValueRef {
- let fty = fields[ix];
- let ccx = bcx.ccx;
-
- let ptr_val = if needs_cast {
- let fields = st.field_index_by_increasing_offset().map(|i| {
- type_of::in_memory_type_of(ccx, fields[i])
- }).collect::<Vec<_>>();
- let real_ty = Type::struct_(ccx, &fields[..], st.packed);
- bcx.pointercast(val.llval, real_ty.ptr_to())
- } else {
- val.llval
- };
-
- // Simple case - we can just GEP the field
- // * First field - Always aligned properly
- // * Packed struct - There is no alignment padding
- // * Field is sized - pointer is properly aligned already
- if st.offsets[ix] == layout::Size::from_bytes(0) || st.packed ||
- bcx.ccx.shared().type_is_sized(fty) {
- return bcx.struct_gep(ptr_val, st.memory_index[ix] as usize);
- }
-
- // If the type of the last field is [T] or str, then we don't need to do
- // any adjusments
- match fty.sty {
- ty::TySlice(..) | ty::TyStr => {
- return bcx.struct_gep(ptr_val, st.memory_index[ix] as usize);
- }
- _ => ()
- }
-
- // There's no metadata available, log the case and just do the GEP.
- if !val.has_extra() {
- debug!("Unsized field `{}`, of `{:?}` has no metadata for adjustment",
- ix, Value(ptr_val));
- return bcx.struct_gep(ptr_val, ix);
- }
-
- // We need to get the pointer manually now.
- // We do this by casting to a *i8, then offsetting it by the appropriate amount.
- // We do this instead of, say, simply adjusting the pointer from the result of a GEP
- // because the field may have an arbitrary alignment in the LLVM representation
- // anyway.
- //
- // To demonstrate:
- // struct Foo<T: ?Sized> {
- // x: u16,
- // y: T
- // }
- //
- // The type Foo<Foo<Trait>> is represented in LLVM as { u16, { u16, u8 }}, meaning that
- // the `y` field has 16-bit alignment.
-
- let meta = val.llextra;
-
-
- let offset = st.offsets[ix].bytes();
- let unaligned_offset = C_uint(bcx.ccx, offset);
-
- // Get the alignment of the field
- let (_, align) = glue::size_and_align_of_dst(bcx, fty, meta);
-
- // Bump the unaligned offset up to the appropriate alignment using the
- // following expression:
- //
- // (unaligned offset + (align - 1)) & -align
-
- // Calculate offset
- let align_sub_1 = bcx.sub(align, C_uint(bcx.ccx, 1u64));
- let offset = bcx.and(bcx.add(unaligned_offset, align_sub_1),
- bcx.neg(align));
-
- debug!("struct_field_ptr: DST field offset: {:?}", Value(offset));
-
- // Cast and adjust pointer
- let byte_ptr = bcx.pointercast(ptr_val, Type::i8p(bcx.ccx));
- let byte_ptr = bcx.gep(byte_ptr, &[offset]);
-
- // Finally, cast back to the type expected
- let ll_fty = type_of::in_memory_type_of(bcx.ccx, fty);
- debug!("struct_field_ptr: Field type is {:?}", ll_fty);
- bcx.pointercast(byte_ptr, ll_fty.ptr_to())
-}
-
// FIXME this utility routine should be somewhere more general
#[inline]
fn roundup(x: u64, a: u32) -> u64 { let a = a as u64; ((x + (a - 1)) / a) * a }
continue;
}
- let src_f = adt::trans_field_ptr(bcx, src, i);
- let dst_f = adt::trans_field_ptr(bcx, dst, i);
+ let src_f = src.trans_field_ptr(bcx, i);
+ let dst_f = dst.trans_field_ptr(bcx, i);
if src_fty == dst_fty {
memcpy_ty(bcx, dst_f, src_f, src_fty, None);
} else {
let mut llarg_idx = fn_ty.ret.is_indirect() as usize;
let mut arg_idx = 0;
for (i, arg_ty) in sig.inputs().iter().enumerate() {
- let lldestptr = adt::trans_field_ptr(&bcx, dest_val, i);
+ let lldestptr = dest_val.trans_field_ptr(&bcx, i);
let arg = &fn_ty.args[arg_idx];
arg_idx += 1;
if common::type_is_fat_ptr(bcx.ccx, arg_ty) {
substs: substs,
variant_index: variant_index,
};
- let field_ptr = adt::trans_field_ptr(&cx, av, i);
+ let field_ptr = av.trans_field_ptr(&cx, i);
drop_ty(&cx, LvalueRef::new_sized_ty(field_ptr, arg));
}
}
match t.sty {
ty::TyClosure(def_id, substs) => {
for (i, upvar_ty) in substs.upvar_tys(def_id, cx.tcx()).enumerate() {
- let llupvar = adt::trans_field_ptr(&cx, ptr, i);
+ let llupvar = ptr.trans_field_ptr(&cx, i);
drop_ty(&cx, LvalueRef::new_sized_ty(llupvar, upvar_ty));
}
}
}
ty::TyTuple(ref args) => {
for (i, arg) in args.iter().enumerate() {
- let llfld_a = adt::trans_field_ptr(&cx, ptr, i);
+ let llfld_a = ptr.trans_field_ptr(&cx, i);
drop_ty(&cx, LvalueRef::new_sized_ty(llfld_a, *arg));
}
}
substs: substs,
variant_index: Disr::from(discr).0 as usize,
};
- let llfld_a = adt::trans_field_ptr(&cx, ptr, i);
+ let llfld_a = ptr.trans_field_ptr(&cx, i);
let ptr = if cx.ccx.shared().type_is_sized(field_ty) {
LvalueRef::new_sized_ty(llfld_a, field_ty)
} else {
// etc.
assert!(!bcx.ccx.shared().type_needs_drop(arg_type));
let arg = LvalueRef::new_sized_ty(llarg, arg_type);
- (0..contents.len())
- .map(|i| {
- bcx.load(adt::trans_field_ptr(bcx, arg, i))
- })
- .collect()
+ (0..contents.len()).map(|i| bcx.load(arg.trans_field_ptr(bcx, i))).collect()
}
intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => {
let llvm_elem = one(ty_to_type(bcx.ccx, llvm_elem, &mut false));
match tuple.val {
Ref(llval) => {
for (n, &ty) in arg_types.iter().enumerate() {
- let ptr = adt::trans_field_ptr(
- bcx, LvalueRef::new_sized_ty(llval, tuple.ty), n
- );
+ let ptr = LvalueRef::new_sized_ty(llval, tuple.ty);
+ let ptr = ptr.trans_field_ptr(bcx, n);
let val = if common::type_is_fat_ptr(bcx.ccx, ty) {
let (lldata, llextra) = base::load_fat_ptr(bcx, ptr, ty);
Pair(lldata, llextra)
// except according to those terms.
use llvm::ValueRef;
-use rustc::ty::{self, Ty, TypeFoldable};
+use rustc::ty::{self, layout, Ty, TypeFoldable};
use rustc::mir;
use rustc::mir::tcx::LvalueTy;
use rustc_data_structures::indexed_vec::Idx;
use machine;
use type_of::type_of;
use type_of;
+use type_::Type;
+use value::Value;
+use glue;
use std::ptr;
pub ty: LvalueTy<'tcx>,
}
-impl<'tcx> LvalueRef<'tcx> {
+impl<'a, 'tcx> LvalueRef<'tcx> {
pub fn new_sized(llval: ValueRef, lvalue_ty: LvalueTy<'tcx>) -> LvalueRef<'tcx> {
LvalueRef { llval: llval, llextra: ptr::null_mut(), ty: lvalue_ty }
}
}
}
- pub fn len<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> ValueRef {
+ pub fn len(&self, ccx: &CrateContext<'a, 'tcx>) -> ValueRef {
let ty = self.ty.to_ty(ccx.tcx());
match ty.sty {
ty::TyArray(_, n) => common::C_uint(ccx, n),
pub fn has_extra(&self) -> bool {
!self.llextra.is_null()
}
+
+ pub fn struct_field_ptr(
+ self,
+ bcx: &Builder<'a, 'tcx>,
+ st: &layout::Struct,
+ fields: &Vec<Ty<'tcx>>,
+ ix: usize,
+ needs_cast: bool
+ ) -> ValueRef {
+ let fty = fields[ix];
+ let ccx = bcx.ccx;
+
+ let ptr_val = if needs_cast {
+ let fields = st.field_index_by_increasing_offset().map(|i| {
+ type_of::in_memory_type_of(ccx, fields[i])
+ }).collect::<Vec<_>>();
+ let real_ty = Type::struct_(ccx, &fields[..], st.packed);
+ bcx.pointercast(self.llval, real_ty.ptr_to())
+ } else {
+ self.llval
+ };
+
+ // Simple case - we can just GEP the field
+ // * First field - Always aligned properly
+ // * Packed struct - There is no alignment padding
+ // * Field is sized - pointer is properly aligned already
+ if st.offsets[ix] == layout::Size::from_bytes(0) || st.packed ||
+ bcx.ccx.shared().type_is_sized(fty) {
+ return bcx.struct_gep(ptr_val, st.memory_index[ix] as usize);
+ }
+
+ // If the type of the last field is [T] or str, then we don't need to do
+ // any adjusments
+ match fty.sty {
+ ty::TySlice(..) | ty::TyStr => {
+ return bcx.struct_gep(ptr_val, st.memory_index[ix] as usize);
+ }
+ _ => ()
+ }
+
+ // There's no metadata available, log the case and just do the GEP.
+ if !self.has_extra() {
+ debug!("Unsized field `{}`, of `{:?}` has no metadata for adjustment",
+ ix, Value(ptr_val));
+ return bcx.struct_gep(ptr_val, ix);
+ }
+
+ // We need to get the pointer manually now.
+ // We do this by casting to a *i8, then offsetting it by the appropriate amount.
+ // We do this instead of, say, simply adjusting the pointer from the result of a GEP
+ // because the field may have an arbitrary alignment in the LLVM representation
+ // anyway.
+ //
+ // To demonstrate:
+ // struct Foo<T: ?Sized> {
+ // x: u16,
+ // y: T
+ // }
+ //
+ // The type Foo<Foo<Trait>> is represented in LLVM as { u16, { u16, u8 }}, meaning that
+ // the `y` field has 16-bit alignment.
+
+ let meta = self.llextra;
+
+
+ let offset = st.offsets[ix].bytes();
+ let unaligned_offset = C_uint(bcx.ccx, offset);
+
+ // Get the alignment of the field
+ let (_, align) = glue::size_and_align_of_dst(bcx, fty, meta);
+
+ // Bump the unaligned offset up to the appropriate alignment using the
+ // following expression:
+ //
+ // (unaligned offset + (align - 1)) & -align
+
+ // Calculate offset
+ let align_sub_1 = bcx.sub(align, C_uint(bcx.ccx, 1u64));
+ let offset = bcx.and(bcx.add(unaligned_offset, align_sub_1),
+ bcx.neg(align));
+
+ debug!("struct_field_ptr: DST field offset: {:?}", Value(offset));
+
+ // Cast and adjust pointer
+ let byte_ptr = bcx.pointercast(ptr_val, Type::i8p(bcx.ccx));
+ let byte_ptr = bcx.gep(byte_ptr, &[offset]);
+
+ // Finally, cast back to the type expected
+ let ll_fty = type_of::in_memory_type_of(bcx.ccx, fty);
+ debug!("struct_field_ptr: Field type is {:?}", ll_fty);
+ bcx.pointercast(byte_ptr, ll_fty.ptr_to())
+ }
+
+ /// Access a field, at a point when the value's case is known.
+ pub fn trans_field_ptr(self, bcx: &Builder<'a, 'tcx>, ix: usize) -> ValueRef {
+ let discr = match self.ty {
+ LvalueTy::Ty { .. } => 0,
+ LvalueTy::Downcast { variant_index, .. } => variant_index,
+ };
+ let t = self.ty.to_ty(bcx.tcx());
+ let l = bcx.ccx.layout_of(t);
+ // Note: if this ever needs to generate conditionals (e.g., if we
+ // decide to do some kind of cdr-coding-like non-unique repr
+ // someday), it will need to return a possibly-new bcx as well.
+ match *l {
+ layout::Univariant { ref variant, .. } => {
+ assert_eq!(discr, 0);
+ self.struct_field_ptr(bcx, &variant,
+ &adt::compute_fields(bcx.ccx, t, 0, false), ix, false)
+ }
+ layout::Vector { count, .. } => {
+ assert_eq!(discr, 0);
+ assert!((ix as u64) < count);
+ bcx.struct_gep(self.llval, ix)
+ }
+ layout::General { discr: d, ref variants, .. } => {
+ let mut fields = adt::compute_fields(bcx.ccx, t, discr, false);
+ fields.insert(0, d.to_ty(&bcx.tcx(), false));
+ self.struct_field_ptr(bcx, &variants[discr], &fields, ix + 1, true)
+ }
+ layout::UntaggedUnion { .. } => {
+ let fields = adt::compute_fields(bcx.ccx, t, 0, false);
+ let ty = type_of::in_memory_type_of(bcx.ccx, fields[ix]);
+ bcx.pointercast(self.llval, ty.ptr_to())
+ }
+ layout::RawNullablePointer { nndiscr, .. } |
+ layout::StructWrappedNullablePointer { nndiscr, .. } if discr as u64 != nndiscr => {
+ let nullfields = adt::compute_fields(bcx.ccx, t, (1-nndiscr) as usize, false);
+ // The unit-like case might have a nonzero number of unit-like fields.
+ // (e.d., Result of Either with (), as one side.)
+ let ty = type_of::type_of(bcx.ccx, nullfields[ix]);
+ assert_eq!(machine::llsize_of_alloc(bcx.ccx, ty), 0);
+ bcx.pointercast(self.llval, ty.ptr_to())
+ }
+ layout::RawNullablePointer { nndiscr, .. } => {
+ let nnty = adt::compute_fields(bcx.ccx, t, nndiscr as usize, false)[0];
+ assert_eq!(ix, 0);
+ assert_eq!(discr as u64, nndiscr);
+ let ty = type_of::type_of(bcx.ccx, nnty);
+ bcx.pointercast(self.llval, ty.ptr_to())
+ }
+ layout::StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => {
+ assert_eq!(discr as u64, nndiscr);
+ self.struct_field_ptr(bcx, &nonnull,
+ &adt::compute_fields(bcx.ccx, t, discr, false), ix, false)
+ }
+ _ => bug!("element access in type without elements: {} represented as {:#?}", t, l)
+ }
+ }
}
impl<'a, 'tcx> MirContext<'a, 'tcx> {
} else {
LvalueRef::new_unsized(tr_base.llval, tr_base.llextra, tr_base.ty)
};
- let llprojected = adt::trans_field_ptr(bcx, base, field.index());
+ let llprojected = base.trans_field_ptr(bcx, field.index());
(llprojected, base.llextra)
}
mir::ProjectionElem::Index(ref index) => {
substs: self.monomorphize(&substs),
variant_index: disr.0 as usize,
};
- let lldest_i = adt::trans_field_ptr(&bcx, val, field_index);
+ let lldest_i = val.trans_field_ptr(&bcx, field_index);
self.store_operand(&bcx, lldest_i, op, None);
}
}