val: MatchInput)
-> ExtractedBlock<'blk, 'tcx> {
let _icx = push_ctxt("match::extract_variant_args");
+ // Assume enums are always sized for now.
+ let val = adt::MaybeSizedValue::sized(val.val);
let args = (0..adt::num_args(repr, disr_val)).map(|i| {
- adt::trans_field_ptr(bcx, repr, val.val, disr_val, i)
+ adt::trans_field_ptr(bcx, repr, val, disr_val, i)
}).collect();
ExtractedBlock { vals: args, bcx: bcx }
(arg_count - 1, Load(bcx, expr::get_dataptr(bcx, val.val)))
};
let mut field_vals: Vec<ValueRef> = (0..arg_count).map(|ix|
- adt::trans_field_ptr(bcx, &*repr, struct_val, 0, ix)
+ // By definition, these are all sized
+ adt::trans_field_ptr(bcx, &*repr, adt::MaybeSizedValue::sized(struct_val), 0, ix)
).collect();
match left_ty.sty {
monomorphize::field_ty(bcx.tcx(), substs, field)
}).unwrap();
let scratch = alloc_ty(bcx, unsized_ty, "__struct_field_fat_ptr");
+
+ let meta = Load(bcx, expr::get_meta(bcx, val.val));
+ let struct_val = adt::MaybeSizedValue::unsized_(struct_val, meta);
+
let data = adt::trans_field_ptr(bcx, &*repr, struct_val, 0, arg_count);
- let len = Load(bcx, expr::get_meta(bcx, val.val));
Store(bcx, data, expr::get_dataptr(bcx, scratch));
- Store(bcx, len, expr::get_meta(bcx, scratch));
+ Store(bcx, meta, expr::get_meta(bcx, scratch));
field_vals.push(scratch);
}
_ => {}
val: MatchInput,
cleanup_scope: cleanup::ScopeId)
-> Block<'blk, 'tcx> {
- debug!("bind_irrefutable_pat(bcx={}, pat={:?})",
+ debug!("bind_irrefutable_pat(bcx={}, pat={:?}, val={})",
bcx.to_str(),
- pat);
+ pat,
+ bcx.val_to_string(val.val));
if bcx.sess().asm_comments() {
add_comment(bcx, &format!("bind_irrefutable_pat(pat={:?})",
Some(ref elems) => {
// This is the tuple struct case.
let repr = adt::represent_node(bcx, pat.id);
+ let val = adt::MaybeSizedValue::sized(val.val);
for (i, elem) in elems.iter().enumerate() {
let fldptr = adt::trans_field_ptr(bcx, &*repr,
- val.val, 0, i);
+ val, 0, i);
bcx = bind_irrefutable_pat(
bcx,
&**elem,
let pat_ty = node_id_type(bcx, pat.id);
let pat_repr = adt::represent_type(bcx.ccx(), pat_ty);
let pat_v = VariantInfo::of_node(tcx, pat_ty, pat.id);
+
+ let val = if type_is_sized(tcx, pat_ty) {
+ adt::MaybeSizedValue::sized(val.val)
+ } else {
+ let data = Load(bcx, expr::get_dataptr(bcx, val.val));
+ let meta = Load(bcx, expr::get_meta(bcx, val.val));
+ adt::MaybeSizedValue::unsized_(data, meta)
+ };
+
for f in fields {
let name = f.node.name;
- let fldptr = adt::trans_field_ptr(
+ let field_idx = pat_v.field_index(name);
+ let mut fldptr = adt::trans_field_ptr(
bcx,
&*pat_repr,
- val.val,
+ val,
pat_v.discr,
- pat_v.field_index(name));
+ field_idx);
+
+ let fty = pat_v.fields[field_idx].1;
+ // If it's not sized, then construct a fat pointer instead of
+ // a regular one
+ if !type_is_sized(tcx, fty) {
+ let scratch = alloc_ty(bcx, fty, "__struct_field_fat_ptr");
+ debug!("Creating fat pointer {}", bcx.val_to_string(scratch));
+ Store(bcx, fldptr, expr::get_dataptr(bcx, scratch));
+ Store(bcx, val.meta, expr::get_meta(bcx, scratch));
+ fldptr = scratch;
+ }
bcx = bind_irrefutable_pat(bcx,
&*f.node.pat,
MatchInput::from_val(fldptr),
}
hir::PatTup(ref elems) => {
let repr = adt::represent_node(bcx, pat.id);
+ let val = adt::MaybeSizedValue::sized(val.val);
for (i, elem) in elems.iter().enumerate() {
- let fldptr = adt::trans_field_ptr(bcx, &*repr, val.val, 0, i);
+ let fldptr = adt::trans_field_ptr(bcx, &*repr, val, 0, i);
bcx = bind_irrefutable_pat(
bcx,
&**elem,
}
}
hir::PatBox(ref inner) => {
- let llbox = Load(bcx, val.val);
+ let pat_ty = node_id_type(bcx, inner.id);
+ // Don't load DSTs, instead pass along a fat ptr
+ let val = if type_is_sized(tcx, pat_ty) {
+ Load(bcx, val.val)
+ } else {
+ val.val
+ };
bcx = bind_irrefutable_pat(
- bcx, &**inner, MatchInput::from_val(llbox), cleanup_scope);
+ bcx, &**inner, MatchInput::from_val(val), cleanup_scope);
}
hir::PatRegion(ref inner, _) => {
- let loaded_val = Load(bcx, val.val);
+ let pat_ty = node_id_type(bcx, inner.id);
+ // Don't load DSTs, instead pass along a fat ptr
+ let val = if type_is_sized(tcx, pat_ty) {
+ Load(bcx, val.val)
+ } else {
+ val.val
+ };
bcx = bind_irrefutable_pat(
bcx,
&**inner,
- MatchInput::from_val(loaded_val),
+ MatchInput::from_val(val),
cleanup_scope);
}
hir::PatVec(ref before, ref slice, ref after) => {
pub use self::Repr::*;
+use std;
use std::rc::Rc;
use llvm::{ValueRef, True, IntEQ, IntNE};
use trans::common::*;
use trans::datum;
use trans::debuginfo::DebugLoc;
+use trans::glue;
use trans::machine;
use trans::monomorphize;
use trans::type_::Type;
pub fields: Vec<Ty<'tcx>>,
}
+#[derive(Copy, Clone)]
+pub struct MaybeSizedValue {
+ pub value: ValueRef,
+ pub meta: ValueRef,
+}
+
+impl MaybeSizedValue {
+ pub fn sized(value: ValueRef) -> MaybeSizedValue {
+ MaybeSizedValue {
+ value: value,
+ meta: std::ptr::null_mut()
+ }
+ }
+
+ pub fn unsized_(value: ValueRef, meta: ValueRef) -> MaybeSizedValue {
+ MaybeSizedValue {
+ value: value,
+ meta: meta
+ }
+ }
+
+ pub fn has_meta(&self) -> bool {
+ !self.meta.is_null()
+ }
+}
+
/// Convenience for `represent_type`. There should probably be more or
/// these, for places in trans where the `Ty` isn't directly
/// available.
monomorphize::field_ty(cx.tcx(), substs, field)
}).collect::<Vec<_>>();
let packed = cx.tcx().lookup_packed(def.did);
- let dtor = def.dtor_kind().has_drop_flag();
+ // FIXME(16758) don't add a drop flag to unsized structs, as it
+ // won't actually be in the location we say it is because it'll be after
+ // the unsized field. Several other pieces of code assume that the unsized
+ // field is definitely the last one.
+ let dtor = def.dtor_kind().has_drop_flag() && type_is_sized(cx.tcx(), t);
if dtor {
ftys.push(cx.tcx().dtor_type());
}
}
General(ity, ref cases, dtor) => {
if dtor_active(dtor) {
- let ptr = trans_field_ptr(bcx, r, val, discr,
+ let ptr = trans_field_ptr(bcx, r, MaybeSizedValue::sized(val), discr,
cases[discr as usize].fields.len() - 2);
Store(bcx, C_u8(bcx.ccx(), DTOR_NEEDED), ptr);
}
/// Access a field, at a point when the value's case is known.
pub fn trans_field_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr<'tcx>,
- val: ValueRef, discr: Disr, ix: usize) -> ValueRef {
+ val: MaybeSizedValue, discr: Disr, ix: usize) -> ValueRef {
// Note: if this ever needs to generate conditionals (e.g., if we
// decide to do some kind of cdr-coding-like non-unique repr
// someday), it will need to return a possibly-new bcx as well.
assert_eq!(machine::llsize_of_alloc(bcx.ccx(), ty), 0);
// The contents of memory at this pointer can't matter, but use
// the value that's "reasonable" in case of pointer comparison.
- PointerCast(bcx, val, ty.ptr_to())
+ PointerCast(bcx, val.value, ty.ptr_to())
}
RawNullablePointer { nndiscr, nnty, .. } => {
assert_eq!(ix, 0);
assert_eq!(discr, nndiscr);
let ty = type_of::type_of(bcx.ccx(), nnty);
- PointerCast(bcx, val, ty.ptr_to())
+ PointerCast(bcx, val.value, ty.ptr_to())
}
StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => {
assert_eq!(discr, nndiscr);
}
}
-pub fn struct_field_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, st: &Struct<'tcx>, val: ValueRef,
+pub fn struct_field_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, st: &Struct<'tcx>, val: MaybeSizedValue,
ix: usize, needs_cast: bool) -> ValueRef {
- let val = if needs_cast {
- let ccx = bcx.ccx();
- let fields = st.fields.iter().map(|&ty| type_of::type_of(ccx, ty)).collect::<Vec<_>>();
+ let ccx = bcx.ccx();
+ let ptr_val = if needs_cast {
+ let fields = st.fields.iter().map(|&ty| {
+ type_of::in_memory_type_of(ccx, ty)
+ }).collect::<Vec<_>>();
let real_ty = Type::struct_(ccx, &fields[..], st.packed);
- PointerCast(bcx, val, real_ty.ptr_to())
+ PointerCast(bcx, val.value, real_ty.ptr_to())
} else {
- val
+ val.value
};
- StructGEP(bcx, val, ix)
+ let fty = st.fields[ix];
+ // Simple case - we can just GEP the field
+ // * First field - Always aligned properly
+ // * Packed struct - There is no alignment padding
+ // * Field is sized - pointer is properly aligned already
+ if ix == 0 || st.packed || type_is_sized(bcx.tcx(), fty) {
+ return StructGEP(bcx, ptr_val, ix);
+ }
+
+ // If the type of the last field is [T] or str, then we don't need to do
+ // any adjusments
+ match fty.sty {
+ ty::TySlice(..) | ty::TyStr => {
+ return StructGEP(bcx, ptr_val, ix);
+ }
+ _ => ()
+ }
+
+ // There's no metadata available, log the case and just do the GEP.
+ if !val.has_meta() {
+ debug!("Unsized field `{}`, of `{}` has no metadata for adjustment",
+ ix,
+ bcx.val_to_string(ptr_val));
+ return StructGEP(bcx, ptr_val, ix);
+ }
+
+ let dbloc = DebugLoc::None;
+
+ // We need to get the pointer manually now.
+ // We do this by casting to a *i8, then offsetting it by the appropriate amount.
+ // We do this instead of, say, simply adjusting the pointer from the result of a GEP
+ // because the field may have an arbitrary alignment in the LLVM representation
+ // anyway.
+ //
+ // To demonstrate:
+ // struct Foo<T: ?Sized> {
+ // x: u16,
+ // y: T
+ // }
+ //
+ // The type Foo<Foo<Trait>> is represented in LLVM as { u16, { u16, u8 }}, meaning that
+ // the `y` field has 16-bit alignment.
+
+ let meta = val.meta;
+
+ // Calculate the unaligned offset of the the unsized field.
+ let mut offset = 0;
+ for &ty in &st.fields[0..ix] {
+ let llty = type_of::sizing_type_of(ccx, ty);
+ let type_align = type_of::align_of(ccx, ty);
+ offset = roundup(offset, type_align);
+ offset += machine::llsize_of_alloc(ccx, llty);
+ }
+ let unaligned_offset = C_uint(bcx.ccx(), offset);
+
+ // Get the alignment of the field
+ let (_, align) = glue::size_and_align_of_dst(bcx, fty, meta);
+
+ // Bump the unaligned offset up to the appropriate alignment using the
+ // following expression:
+ //
+ // (unaligned offset + (align - 1)) & -align
+
+ // Calculate offset
+ let align_sub_1 = Sub(bcx, align, C_uint(bcx.ccx(), 1u64), dbloc);
+ let offset = And(bcx,
+ Add(bcx, unaligned_offset, align_sub_1, dbloc),
+ Neg(bcx, align, dbloc),
+ dbloc);
+
+ debug!("struct_field_ptr: DST field offset: {}",
+ bcx.val_to_string(offset));
+
+ // Cast and adjust pointer
+ let byte_ptr = PointerCast(bcx, ptr_val, Type::i8p(bcx.ccx()));
+ let byte_ptr = GEP(bcx, byte_ptr, &[offset]);
+
+ // Finally, cast back to the type expected
+ let ll_fty = type_of::in_memory_type_of(bcx.ccx(), fty);
+ debug!("struct_field_ptr: Field type is {}", ll_fty.to_string());
+ PointerCast(bcx, byte_ptr, ll_fty.ptr_to())
}
pub fn fold_variants<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
cleanup::CustomScope(custom_cleanup_scope), (), |_, bcx, _| bcx
));
bcx = fold_variants(bcx, r, val, |variant_cx, st, value| {
- let ptr = struct_field_ptr(variant_cx, st, value, (st.fields.len() - 1), false);
+ let ptr = struct_field_ptr(variant_cx, st, MaybeSizedValue::sized(value),
+ (st.fields.len() - 1), false);
datum::Datum::new(ptr, ptr_ty, datum::Lvalue::new("adt::trans_drop_flag_ptr"))
.store_to(variant_cx, scratch.val)
});
fn iter_variant<'blk, 'tcx, F>(cx: Block<'blk, 'tcx>,
repr: &adt::Repr<'tcx>,
- av: ValueRef,
+ av: adt::MaybeSizedValue,
variant: ty::VariantDef<'tcx>,
substs: &Substs<'tcx>,
f: &mut F)
return cx;
}
- let (data_ptr, info) = if common::type_is_sized(cx.tcx(), t) {
- (av, None)
+ let value = if common::type_is_sized(cx.tcx(), t) {
+ adt::MaybeSizedValue::sized(av)
} else {
- let data = expr::get_dataptr(cx, av);
- let info = expr::get_meta(cx, av);
- (Load(cx, data), Some(Load(cx, info)))
+ let data = Load(cx, expr::get_dataptr(cx, av));
+ let info = Load(cx, expr::get_meta(cx, av));
+ adt::MaybeSizedValue::unsized_(data, info)
};
let mut cx = cx;
let repr = adt::represent_type(cx.ccx(), t);
let VariantInfo { fields, discr } = VariantInfo::from_ty(cx.tcx(), t, None);
for (i, &Field(_, field_ty)) in fields.iter().enumerate() {
- let llfld_a = adt::trans_field_ptr(cx, &*repr, data_ptr, discr, i);
+ let llfld_a = adt::trans_field_ptr(cx, &*repr, value, discr, i);
let val = if common::type_is_sized(cx.tcx(), field_ty) {
llfld_a
} else {
let scratch = datum::rvalue_scratch_datum(cx, field_ty, "__fat_ptr_iter");
Store(cx, llfld_a, expr::get_dataptr(cx, scratch.val));
- Store(cx, info.unwrap(), expr::get_meta(cx, scratch.val));
+ Store(cx, value.meta, expr::get_meta(cx, scratch.val));
scratch.val
};
cx = f(cx, val, field_ty);
ty::TyClosure(_, ref substs) => {
let repr = adt::represent_type(cx.ccx(), t);
for (i, upvar_ty) in substs.upvar_tys.iter().enumerate() {
- let llupvar = adt::trans_field_ptr(cx, &*repr, data_ptr, 0, i);
+ let llupvar = adt::trans_field_ptr(cx, &*repr, value, 0, i);
cx = f(cx, llupvar, upvar_ty);
}
}
ty::TyArray(_, n) => {
- let (base, len) = tvec::get_fixed_base_and_len(cx, data_ptr, n);
+ let (base, len) = tvec::get_fixed_base_and_len(cx, value.value, n);
let unit_ty = t.sequence_element_type(cx.tcx());
cx = tvec::iter_vec_raw(cx, base, unit_ty, len, f);
}
ty::TySlice(_) | ty::TyStr => {
let unit_ty = t.sequence_element_type(cx.tcx());
- cx = tvec::iter_vec_raw(cx, data_ptr, unit_ty, info.unwrap(), f);
+ cx = tvec::iter_vec_raw(cx, value.value, unit_ty, value.meta, f);
}
ty::TyTuple(ref args) => {
let repr = adt::represent_type(cx.ccx(), t);
for (i, arg) in args.iter().enumerate() {
- let llfld_a = adt::trans_field_ptr(cx, &*repr, data_ptr, 0, i);
+ let llfld_a = adt::trans_field_ptr(cx, &*repr, value, 0, i);
cx = f(cx, llfld_a, *arg);
}
}
(_match::Single, None) => {
if n_variants != 0 {
assert!(n_variants == 1);
- cx = iter_variant(cx, &*repr, av, &en.variants[0], substs, &mut f);
+ cx = iter_variant(cx, &*repr, adt::MaybeSizedValue::sized(av),
+ &en.variants[0], substs, &mut f);
}
}
(_match::Switch, Some(lldiscrim_a)) => {
AddCase(llswitch, case_val, variant_cx.llbb);
let variant_cx = iter_variant(variant_cx,
&*repr,
- data_ptr,
+ value,
variant,
substs,
&mut f);
_ => bcx.sess().bug("struct has non-univariant repr"),
};
+ let src = adt::MaybeSizedValue::sized(src);
+ let dst = adt::MaybeSizedValue::sized(dst);
+
let iter = src_fields.iter().zip(dst_fields).enumerate();
for (i, (src_fty, dst_fty)) in iter {
if type_is_zero_size(bcx.ccx(), dst_fty) {
if !type_is_zero_size(fcx.ccx, result_ty.unwrap()) {
let dest = fcx.get_ret_slot(bcx, result_ty, "eret_slot");
+ let dest_val = adt::MaybeSizedValue::sized(dest); // Can return unsized value
let repr = adt::represent_type(ccx, result_ty.unwrap());
let mut llarg_idx = fcx.arg_offset() as c_uint;
for (i, arg_ty) in arg_tys.into_iter().enumerate() {
- let lldestptr = adt::trans_field_ptr(bcx, &*repr, dest, disr, i);
+ let lldestptr = adt::trans_field_ptr(bcx, &*repr, dest_val, disr, i);
if common::type_is_fat_ptr(bcx.tcx(), arg_ty) {
Store(bcx,
get_param(fcx.llfn, llarg_idx),
.collect::<Vec<String>>()
.join(", "));
+ let mut fn_ty = val_ty(llfn);
+ // Strip off pointers
+ while fn_ty.kind() == llvm::TypeKind::Pointer {
+ fn_ty = fn_ty.element_type();
+ }
+
+ assert!(fn_ty.kind() == llvm::TypeKind::Function,
+ "builder::call not passed a function");
+
+ let param_tys = fn_ty.func_params();
+
+ let iter = param_tys.into_iter()
+ .zip(args.iter().map(|&v| val_ty(v)));
+ for (i, (expected_ty, actual_ty)) in iter.enumerate() {
+ if expected_ty != actual_ty {
+ self.ccx.sess().bug(
+ &format!(
+ "Type mismatch in function call of {}. Expected {} for param {}, got {}",
+ self.ccx.tn().val_to_string(llfn),
+ self.ccx.tn().type_to_string(expected_ty),
+ i,
+ self.ccx.tn().type_to_string(actual_ty)));
+
+ }
+ }
+
unsafe {
let v = llvm::LLVMBuildCall(self.llbuilder, llfn, args.as_ptr(),
args.len() as c_uint, noname());
// Create the closure.
for (i, freevar) in freevars.iter().enumerate() {
let datum = expr::trans_local_var(bcx, freevar.def);
- let upvar_slot_dest = adt::trans_field_ptr(bcx, &*repr, dest_addr, 0, i);
+ let upvar_slot_dest = adt::trans_field_ptr(
+ bcx, &*repr, adt::MaybeSizedValue::sized(dest_addr), 0, i);
let upvar_id = ty::UpvarId { var_id: freevar.def.var_id(),
closure_expr_id: id };
match tcx.upvar_capture(upvar_id).unwrap() {
pub fn get_element<'blk, F>(&self, bcx: Block<'blk, 'tcx>, ty: Ty<'tcx>,
gep: F)
-> Datum<'tcx, Lvalue> where
- F: FnOnce(ValueRef) -> ValueRef,
+ F: FnOnce(adt::MaybeSizedValue) -> ValueRef,
{
let val = if type_is_sized(bcx.tcx(), self.ty) {
- gep(self.val)
+ let val = adt::MaybeSizedValue::sized(self.val);
+ gep(val)
} else {
- gep(Load(bcx, expr::get_dataptr(bcx, self.val)))
+ let val = adt::MaybeSizedValue::unsized_(
+ Load(bcx, expr::get_dataptr(bcx, self.val)),
+ Load(bcx, expr::get_meta(bcx, self.val)));
+ gep(val)
};
Datum {
val: val,
};
assert!(coerce_index < src_fields.len() && src_fields.len() == target_fields.len());
+ let source_val = adt::MaybeSizedValue::sized(source.val);
+ let target_val = adt::MaybeSizedValue::sized(target.val);
+
let iter = src_fields.iter().zip(target_fields).enumerate();
for (i, (src_ty, target_ty)) in iter {
- let ll_source = adt::trans_field_ptr(bcx, &repr_source, source.val, 0, i);
- let ll_target = adt::trans_field_ptr(bcx, &repr_target, target.val, 0, i);
+ let ll_source = adt::trans_field_ptr(bcx, &repr_source, source_val, 0, i);
+ let ll_target = adt::trans_field_ptr(bcx, &repr_target, target_val, 0, i);
// If this is the field we need to coerce, recurse on it.
if i == coerce_index {
let d = base_datum.get_element(
bcx,
vinfo.fields[ix].1,
- |srcval| adt::trans_field_ptr(bcx, &*repr, srcval, vinfo.discr, ix));
+ |srcval| {
+ adt::trans_field_ptr(bcx, &*repr, srcval, vinfo.discr, ix)
+ });
if type_is_sized(bcx.tcx(), d.ty) {
DatumBlock { datum: d.to_expr_datum(), bcx: bcx }
// Second, trans the base to the dest.
assert_eq!(discr, 0);
+ let addr = adt::MaybeSizedValue::sized(addr);
match expr_kind(bcx.tcx(), &*base.expr) {
ExprKind::RvalueDps | ExprKind::RvalueDatum if !bcx.fcx.type_needs_drop(ty) => {
- bcx = trans_into(bcx, &*base.expr, SaveIn(addr));
+ bcx = trans_into(bcx, &*base.expr, SaveIn(addr.value));
},
ExprKind::RvalueStmt => {
bcx.tcx().sess.bug("unexpected expr kind for struct base expr")
}
} else {
// No base means we can write all fields directly in place.
+ let addr = adt::MaybeSizedValue::sized(addr);
for &(i, ref e) in fields {
let dest = adt::trans_field_ptr(bcx, &*repr, addr, discr, i);
let e_ty = expr_ty_adjusted(bcx, &**e);
//
// Code relating to drop glue.
+use std;
use back::link::*;
use llvm;
// Choose max of two known alignments (combined value must
// be aligned according to more restrictive of the two).
- let align = Select(bcx,
- ICmp(bcx,
- llvm::IntUGT,
- sized_align,
- unsized_align,
- dbloc),
- sized_align,
- unsized_align);
+ let align = match (const_to_opt_uint(sized_align), const_to_opt_uint(unsized_align)) {
+ (Some(sized_align), Some(unsized_align)) => {
+ // If both alignments are constant, (the sized_align should always be), then
+ // pick the correct alignment statically.
+ C_uint(ccx, std::cmp::max(sized_align, unsized_align))
+ }
+ _ => Select(bcx,
+ ICmp(bcx,
+ llvm::IntUGT,
+ sized_align,
+ unsized_align,
+ dbloc),
+ sized_align,
+ unsized_align)
+ };
// Issue #27023: must add any necessary padding to `size`
// (to make it a multiple of `align`) before returning it.
//
// emulated via the semi-standard fast bit trick:
//
- // `(size + (align-1)) & !align`
+ // `(size + (align-1)) & -align`
let addend = Sub(bcx, align, C_uint(bcx.ccx(), 1_u64), dbloc);
let size = And(
let repr = adt::represent_type(bcx.ccx(), arg_type);
let repr_ptr = &*repr;
+ let arg = adt::MaybeSizedValue::sized(llarg);
(0..contents.len())
.map(|i| {
- Load(bcx, adt::trans_field_ptr(bcx, repr_ptr, llarg, 0, i))
+ Load(bcx, adt::trans_field_ptr(bcx, repr_ptr, arg, 0, i))
})
.collect()
}
LvalueTy::Downcast { adt_def: _, substs: _, variant_index: v } => v,
};
let discr = discr as u64;
- (adt::trans_field_ptr(bcx, &base_repr, tr_base.llval, discr, field.index()),
- if common::type_is_sized(tcx, projected_ty.to_ty(tcx)) {
+ let is_sized = common::type_is_sized(tcx, projected_ty.to_ty(tcx));
+ let base = if is_sized {
+ adt::MaybeSizedValue::sized(tr_base.llval)
+ } else {
+ adt::MaybeSizedValue::unsized_(tr_base.llval, tr_base.llextra)
+ };
+ (adt::trans_field_ptr(bcx, &base_repr, base, discr, field.index()),
+ if is_sized {
ptr::null_mut()
} else {
tr_base.llextra
--- /dev/null
+// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+struct Foo<T: ?Sized> {
+ a: u16,
+ b: T
+}
+
+trait Bar {
+ fn get(&self) -> usize;
+}
+
+impl Bar for usize {
+ fn get(&self) -> usize { *self }
+}
+
+struct Baz<T: ?Sized> {
+ a: T
+}
+
+#[repr(packed)]
+struct Packed<T: ?Sized> {
+ a: u8,
+ b: T
+}
+
+struct HasDrop<T: ?Sized> {
+ ptr: Box<usize>,
+ data: T
+}
+
+fn main() {
+ // Test that zero-offset works properly
+ let b : Baz<usize> = Baz { a: 7 };
+ assert_eq!(b.a.get(), 7);
+ let b : &Baz<Bar> = &b;
+ assert_eq!(b.a.get(), 7);
+
+ // Test that the field is aligned properly
+ let f : Foo<usize> = Foo { a: 0, b: 11 };
+ assert_eq!(f.b.get(), 11);
+ let ptr1 : *const u8 = &f.b as *const _ as *const u8;
+
+ let f : &Foo<Bar> = &f;
+ let ptr2 : *const u8 = &f.b as *const _ as *const u8;
+ assert_eq!(f.b.get(), 11);
+
+ // The pointers should be the same
+ assert_eq!(ptr1, ptr2);
+
+ // Test that packed structs are handled correctly
+ let p : Packed<usize> = Packed { a: 0, b: 13 };
+ assert_eq!(p.b.get(), 13);
+ let p : &Packed<Bar> = &p;
+ assert_eq!(p.b.get(), 13);
+
+ // Test that nested DSTs work properly
+ let f : Foo<Foo<usize>> = Foo { a: 0, b: Foo { a: 1, b: 17 }};
+ assert_eq!(f.b.b.get(), 17);
+ let f : &Foo<Foo<Bar>> = &f;
+ assert_eq!(f.b.b.get(), 17);
+
+ // Test that get the pointer via destructuring works
+
+ let f : Foo<usize> = Foo { a: 0, b: 11 };
+ let f : &Foo<Bar> = &f;
+ let &Foo { a: _, b: ref bar } = f;
+ assert_eq!(bar.get(), 11);
+
+ // Make sure that drop flags don't screw things up
+
+ let d : HasDrop<Baz<[i32; 4]>> = HasDrop {
+ ptr: Box::new(0),
+ data: Baz { a: [1,2,3,4] }
+ };
+ assert_eq!([1,2,3,4], d.data.a);
+
+ let d : &HasDrop<Baz<[i32]>> = &d;
+ assert_eq!(&[1,2,3,4], &d.data.a);
+}