E0489, // type/lifetime parameter not in scope here
E0490, // a value of type `..` is borrowed for too long
E0495, // cannot infer an appropriate lifetime due to conflicting requirements
- E0566 // conflicting representation hints
+ E0566, // conflicting representation hints
+ E0587, // conflicting packed and align representation hints
}
};
let mut conflicting_reprs = 0;
+ let mut found_packed = false;
+ let mut found_align = false;
+
for word in words {
let name = match word.name() {
("attribute should be applied to struct or union",
"a struct or union")
} else {
+ found_packed = true;
continue
}
}
continue
}
}
+ "align" => {
+ found_align = true;
+ if target != Target::Struct {
+ ("attribute should be applied to struct",
+ "a struct")
+ } else {
+ continue
+ }
+ }
"i8" | "u8" | "i16" | "u16" |
"i32" | "u32" | "i64" | "u64" |
"isize" | "usize" => {
span_warn!(self.sess, attr.span, E0566,
"conflicting representation hints");
}
+ if found_align && found_packed {
+ struct_span_err!(self.sess, attr.span, E0587,
+ "conflicting packed and align representation hints").emit();
+ }
}
fn check_attribute(&self, attr: &ast::Attribute, target: Target) {
/// A structure, a product type in ADT terms.
#[derive(PartialEq, Eq, Hash, Debug)]
pub struct Struct {
+ /// Maximum alignment of fields and repr alignment.
pub align: Align,
+ /// Primitive alignment of fields without repr alignment.
+ pub primitive_align: Align,
+
/// If true, no alignment padding is used.
pub packed: bool,
fn new(dl: &TargetDataLayout, fields: &Vec<&'a Layout>,
repr: &ReprOptions, kind: StructKind,
scapegoat: Ty<'gcx>) -> Result<Struct, LayoutError<'gcx>> {
- let packed = repr.packed();
+ if repr.packed() && repr.align > 0 {
+ bug!("Struct cannot be packed and aligned");
+ }
+
+ let align = if repr.packed() {
+ dl.i8_align
+ } else {
+ dl.aggregate_align
+ };
+
let mut ret = Struct {
- align: if packed { dl.i8_align } else { dl.aggregate_align },
- packed: packed,
+ align: align,
+ primitive_align: align,
+ packed: repr.packed(),
sized: true,
offsets: vec![],
memory_index: vec![],
// Invariant: offset < dl.obj_size_bound() <= 1<<61
if !ret.packed {
let align = field.align(dl);
+ let primitive_align = field.primitive_align(dl);
ret.align = ret.align.max(align);
+ ret.primitive_align = ret.primitive_align.max(primitive_align);
offset = offset.abi_align(align);
}
.map_or(Err(LayoutError::SizeOverflow(scapegoat)), Ok)?;
}
+ if repr.align > 0 {
+ let repr_align = repr.align as u64;
+ ret.align = ret.align.max(Align::from_bytes(repr_align, repr_align).unwrap());
+ debug!("Struct::new repr_align: {:?}", repr_align);
+ }
debug!("Struct::new min_size: {:?}", offset);
ret.min_size = offset;
}
Ok(None)
}
+
+ pub fn over_align(&self) -> Option<u32> {
+ let align = self.align.abi();
+ let primitive_align = self.primitive_align.abi();
+ if align > primitive_align {
+ Some(align as u32)
+ } else {
+ None
+ }
+ }
}
/// An untagged union.
#[derive(PartialEq, Eq, Hash, Debug)]
pub struct Union {
pub align: Align,
+ pub primitive_align: Align,
pub min_size: Size,
impl<'a, 'gcx, 'tcx> Union {
fn new(dl: &TargetDataLayout, packed: bool) -> Union {
+ let align = if packed { dl.i8_align } else { dl.aggregate_align };
Union {
- align: if packed { dl.i8_align } else { dl.aggregate_align },
+ align: align,
+ primitive_align: align,
min_size: Size::from_bytes(0),
packed: packed,
}
if !self.packed {
self.align = self.align.max(field.align(dl));
+ self.primitive_align = self.primitive_align.max(field.primitive_align(dl));
}
self.min_size = cmp::max(self.min_size, field.size(dl));
}
pub fn stride(&self) -> Size {
self.min_size.abi_align(self.align)
}
+
+ pub fn over_align(&self) -> Option<u32> {
+ let align = self.align.abi();
+ let primitive_align = self.primitive_align.abi();
+ if align > primitive_align {
+ Some(align as u32)
+ } else {
+ None
+ }
+ }
}
/// The first half of a fat pointer.
/// If true, the size is exact, otherwise it's only a lower bound.
sized: bool,
align: Align,
+ primitive_align: Align,
element_size: Size,
count: u64
},
discr: Integer,
variants: Vec<Struct>,
size: Size,
- align: Align
+ align: Align,
+ primitive_align: Align,
},
/// Two cases distinguished by a nullable pointer: the case with discriminant
Array {
sized: true,
align: element.align(dl),
+ primitive_align: element.primitive_align(dl),
element_size: element_size,
count: count
}
Array {
sized: false,
align: element.align(dl),
+ primitive_align: element.primitive_align(dl),
element_size: element.size(dl),
count: 0
}
Array {
sized: false,
align: dl.i8_align,
+ primitive_align: dl.i8_align,
element_size: Size::from_bytes(1),
count: 0
}
assert!(discr_max >= 0);
let (min_ity, _) = Integer::repr_discr(tcx, ty, &def.repr, 0, discr_max);
let mut align = dl.aggregate_align;
+ let mut primitive_align = dl.aggregate_align;
let mut size = Size::from_bytes(0);
// We're interested in the smallest alignment, so start large.
}
size = cmp::max(size, st.min_size);
align = align.max(st.align);
+ primitive_align = primitive_align.max(st.primitive_align);
Ok(st)
}).collect::<Result<Vec<_>, _>>()?;
discr: ity,
variants: variants,
size: size,
- align: align
+ align: align,
+ primitive_align: primitive_align
}
}
}
}
+ /// Returns alignment before repr alignment is applied
+ pub fn primitive_align(&self, dl: &TargetDataLayout) -> Align {
+ match *self {
+ Array { primitive_align, .. } | General { primitive_align, .. } => primitive_align,
+ Univariant { ref variant, .. } |
+ StructWrappedNullablePointer { nonnull: ref variant, .. } => {
+ variant.primitive_align
+ },
+
+ _ => self.align(dl)
+ }
+ }
+
+ /// Returns repr alignment if it is greater than the primitive alignment.
+ pub fn over_align(&self, dl: &TargetDataLayout) -> Option<u32> {
+ let align = self.align(dl);
+ let primitive_align = self.primitive_align(dl);
+ if align.abi() > primitive_align.abi() {
+ Some(align.abi() as u32)
+ } else {
+ None
+ }
+ }
+
pub fn field_offset<C: HasDataLayout>(&self,
cx: C,
i: usize,
use std::borrow::Cow;
use std::cell::{Cell, RefCell, Ref};
use std::collections::BTreeMap;
+use std::cmp;
use std::hash::{Hash, Hasher};
use std::ops::Deref;
use std::rc::Rc;
#[derive(Copy, Clone, Eq, PartialEq, RustcEncodable, RustcDecodable, Default)]
pub struct ReprOptions {
pub int: Option<attr::IntType>,
+ pub align: u16,
pub flags: ReprFlags,
}
impl_stable_hash_for!(struct ReprOptions {
+ align,
int,
flags
});
pub fn new(tcx: TyCtxt, did: DefId) -> ReprOptions {
let mut flags = ReprFlags::empty();
let mut size = None;
-
+ let mut max_align = 0;
for attr in tcx.get_attrs(did).iter() {
for r in attr::find_repr_attrs(tcx.sess.diagnostic(), attr) {
flags.insert(match r {
size = Some(i);
ReprFlags::empty()
},
+ attr::ReprAlign(align) => {
+ max_align = cmp::max(align, max_align);
+ ReprFlags::empty()
+ },
});
}
}
if !tcx.consider_optimizing(|| format!("Reorder fields of {:?}", tcx.item_path_str(did))) {
flags.insert(ReprFlags::IS_LINEAR);
}
- ReprOptions { int: size, flags: flags }
+ ReprOptions { int: size, align: max_align, flags: flags }
}
#[inline]
// bitcasting to the struct type yields invalid cast errors.
// We instead thus allocate some scratch space...
- let llscratch = bcx.alloca(ty, "abi_cast");
+ let llscratch = bcx.alloca(ty, "abi_cast", None);
base::Lifetime::Start.call(bcx, llscratch);
// ...where we first store the value...
/// and fill in the actual contents in a second pass to prevent
/// unbounded recursion; see also the comments in `trans::type_of`.
pub fn type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Type {
- generic_type_of(cx, t, None, false, false)
+ generic_type_of(cx, t, None, false)
}
pub fn incomplete_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
t: Ty<'tcx>, name: &str) -> Type {
- generic_type_of(cx, t, Some(name), false, false)
+ generic_type_of(cx, t, Some(name), false)
}
pub fn finish_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
_ => unreachable!()
};
let fields = compute_fields(cx, t, nonnull_variant_index as usize, true);
- llty.set_struct_body(&struct_llfields(cx, &fields, nonnull_variant, false, false),
+ llty.set_struct_body(&struct_llfields(cx, &fields, nonnull_variant, false),
packed)
},
_ => bug!("This function cannot handle {} with layout {:#?}", t, l)
fn generic_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
t: Ty<'tcx>,
name: Option<&str>,
- sizing: bool,
- dst: bool) -> Type {
+ sizing: bool) -> Type {
let l = cx.layout_of(t);
- debug!("adt::generic_type_of t: {:?} name: {:?} sizing: {} dst: {}",
- t, name, sizing, dst);
+ debug!("adt::generic_type_of t: {:?} name: {:?} sizing: {}", t, name, sizing);
match *l {
layout::CEnum { discr, .. } => Type::from_integer(cx, discr),
layout::RawNullablePointer { nndiscr, .. } => {
let fields = compute_fields(cx, t, nndiscr as usize, false);
match name {
None => {
- Type::struct_(cx, &struct_llfields(cx, &fields, nonnull, sizing, dst),
+ Type::struct_(cx, &struct_llfields(cx, &fields, nonnull, sizing),
nonnull.packed)
}
Some(name) => {
let fields = compute_fields(cx, t, 0, true);
match name {
None => {
- let fields = struct_llfields(cx, &fields, &variant, sizing, dst);
+ let fields = struct_llfields(cx, &fields, &variant, sizing);
Type::struct_(cx, &fields, variant.packed)
}
Some(name) => {
}
-fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, fields: &Vec<Ty<'tcx>>,
+// Double index to account for padding (FieldPath already uses `Struct::memory_index`)
+fn struct_llfields_path(discrfield: &layout::FieldPath) -> Vec<usize> {
+ discrfield.iter().map(|&i| (i as usize) << 1).collect::<Vec<_>>()
+}
+
+
+// Lookup `Struct::memory_index` and double it to account for padding
+pub fn struct_llfields_index(variant: &layout::Struct, index: usize) -> usize {
+ (variant.memory_index[index] as usize) << 1
+}
+
+
+pub fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, field_tys: &Vec<Ty<'tcx>>,
variant: &layout::Struct,
- sizing: bool, _dst: bool) -> Vec<Type> {
- let fields = variant.field_index_by_increasing_offset().map(|i| fields[i as usize]);
+ sizing: bool) -> Vec<Type> {
if sizing {
- bug!()
+ bug!();
+ }
+ debug!("struct_llfields: variant: {:?}", variant);
+ let mut first_field = true;
+ let mut min_offset = 0;
+ let mut result: Vec<Type> = Vec::with_capacity(field_tys.len() * 2);
+ let field_iter = variant.field_index_by_increasing_offset().map(|i| {
+ (i, field_tys[i as usize], variant.offsets[i as usize].bytes()) });
+ for (index, ty, target_offset) in field_iter.filter(
+ |&(_, ty, _)| !sizing || cx.shared().type_is_sized(ty)) {
+ if first_field {
+ debug!("struct_llfields: {} ty: {} min_offset: {} target_offset: {}",
+ index, ty, min_offset, target_offset);
+ first_field = false;
+ } else {
+ assert!(target_offset >= min_offset);
+ let padding_bytes = if variant.packed { 0 } else { target_offset - min_offset };
+ result.push(Type::array(&Type::i8(cx), padding_bytes));
+ debug!("struct_llfields: {} ty: {} pad_bytes: {} min_offset: {} target_offset: {}",
+ index, ty, padding_bytes, min_offset, target_offset);
+ }
+ let llty = type_of::in_memory_type_of(cx, ty);
+ result.push(llty);
+ let layout = cx.layout_of(ty);
+ let target_size = layout.size(&cx.tcx().data_layout).bytes();
+ min_offset = target_offset + target_size;
+ }
+ if variant.sized && !field_tys.is_empty() {
+ if variant.stride().bytes() < min_offset {
+ bug!("variant: {:?} stride: {} min_offset: {}", variant, variant.stride().bytes(),
+ min_offset);
+ }
+ let padding_bytes = variant.stride().bytes() - min_offset;
+ debug!("struct_llfields: pad_bytes: {} min_offset: {} min_size: {} stride: {}\n",
+ padding_bytes, min_offset, variant.min_size.bytes(), variant.stride().bytes());
+ result.push(Type::array(&Type::i8(cx), padding_bytes));
+ assert!(result.len() == (field_tys.len() * 2));
} else {
- fields.map(|ty| type_of::in_memory_type_of(cx, ty)).collect()
+ debug!("struct_llfields: min_offset: {} min_size: {} stride: {}\n",
+ min_offset, variant.min_size.bytes(), variant.stride().bytes());
}
+
+ result
}
pub fn is_discr_signed<'tcx>(l: &layout::Layout) -> bool {
scrutinee: ValueRef,
alignment: Alignment,
) -> ValueRef {
- let llptrptr = bcx.gepi(scrutinee,
- &discrfield.iter().map(|f| *f as usize).collect::<Vec<_>>());
+ let path = struct_llfields_path(discrfield);
+ let llptrptr = bcx.gepi(scrutinee, &path);
let llptr = bcx.load(llptrptr, alignment.to_align());
let cmp = if nndiscr == 0 { IntEQ } else { IntNE };
bcx.icmp(cmp, llptr, C_null(val_ty(llptr)))
let align = C_i32(bcx.ccx, nonnull.align.abi() as i32);
base::call_memset(bcx, llptr, fill_byte, size, align, false);
} else {
- let path = discrfield.iter().map(|&i| i as usize).collect::<Vec<_>>();
+ let path = struct_llfields_path(discrfield);
let llptrptr = bcx.gepi(val, &path);
let llptrty = val_ty(llptrptr).element_type();
bcx.store(C_null(llptrty), llptrptr, None);
}
}
- pub fn alloca(&self, ty: Type, name: &str) -> ValueRef {
+ pub fn alloca(&self, ty: Type, name: &str, align: Option<u32>) -> ValueRef {
let builder = Builder::with_ccx(self.ccx);
builder.position_at_start(unsafe {
llvm::LLVMGetFirstBasicBlock(self.llfn())
});
- builder.dynamic_alloca(ty, name)
+ builder.dynamic_alloca(ty, name, align)
}
- pub fn dynamic_alloca(&self, ty: Type, name: &str) -> ValueRef {
+ pub fn dynamic_alloca(&self, ty: Type, name: &str, align: Option<u32>) -> ValueRef {
self.count_insn("alloca");
unsafe {
- if name.is_empty() {
+ let alloca = if name.is_empty() {
llvm::LLVMBuildAlloca(self.llbuilder, ty.to_ref(), noname())
} else {
let name = CString::new(name).unwrap();
llvm::LLVMBuildAlloca(self.llbuilder, ty.to_ref(),
name.as_ptr())
+ };
+ if let Some(align) = align {
+ llvm::LLVMSetAlignment(alloca, align as c_uint);
}
+ alloca
}
}
//
// More information can be found in libstd's seh.rs implementation.
let i64p = Type::i64(ccx).ptr_to();
- let slot = bcx.alloca(i64p, "slot");
+ let slot = bcx.alloca(i64p, "slot", None);
bcx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(),
None);
use rustc::ty::layout::{self, LayoutTyper};
use rustc::mir;
use abi::{Abi, FnType, ArgType};
+use adt;
use base::{self, Lifetime};
use callee;
use builder::Builder;
};
let llslot = match op.val {
Immediate(_) | Pair(..) => {
- let llscratch = bcx.alloca(ret.memory_ty(bcx.ccx), "ret");
+ let llscratch = bcx.alloca(ret.memory_ty(bcx.ccx), "ret", None);
self.store_operand(&bcx, llscratch, None, op);
llscratch
}
let (mut llval, align, by_ref) = match op.val {
Immediate(_) | Pair(..) => {
if arg.is_indirect() || arg.cast.is_some() {
- let llscratch = bcx.alloca(arg.memory_ty(bcx.ccx), "arg");
+ let llscratch = bcx.alloca(arg.memory_ty(bcx.ccx), "arg", None);
self.store_operand(bcx, llscratch, None, op);
(llscratch, Alignment::AbiAligned, true)
} else {
// think that ATM (Rust 1.16) we only pass temporaries, but we shouldn't
// have scary latent bugs around.
- let llscratch = bcx.alloca(arg.memory_ty(bcx.ccx), "arg");
+ let llscratch = bcx.alloca(arg.memory_ty(bcx.ccx), "arg", None);
base::memcpy_ty(bcx, llscratch, llval, op.ty, Some(1));
(llscratch, Alignment::AbiAligned, true)
}
bug!("Not a tuple.");
};
for (n, &ty) in arg_types.iter().enumerate() {
- let mut elem = bcx.extract_value(llval, v.memory_index[n] as usize);
+ let mut elem = bcx.extract_value(
+ llval, adt::struct_llfields_index(v, n));
// Truncate bools to i1, if needed
if ty.is_bool() && common::val_ty(elem) != Type::i1(bcx.ccx) {
elem = bcx.trunc(elem, Type::i1(bcx.ccx));
slot
} else {
let llretty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false);
- let slot = bcx.alloca(llretty, "personalityslot");
+ let slot = bcx.alloca(llretty, "personalityslot", None);
self.llpersonalityslot = Some(slot);
slot
}
pub fn alloca(bcx: &Builder<'a, 'tcx>, ty: Ty<'tcx>, name: &str) -> LvalueRef<'tcx> {
debug!("alloca({:?}: {:?})", name, ty);
- let tmp = bcx.alloca(type_of::type_of(bcx.ccx, ty), name);
+ let tmp = bcx.alloca(
+ type_of::type_of(bcx.ccx, ty), name, bcx.ccx.over_align_of(ty));
assert!(!ty.has_param_types());
Self::new_sized_ty(tmp, ty, Alignment::AbiAligned)
}
let alignment = self.alignment | Alignment::from_packed(st.packed);
+ let llfields = adt::struct_llfields(ccx, fields, st, false);
let ptr_val = if needs_cast {
- let fields = st.field_index_by_increasing_offset().map(|i| {
- type_of::in_memory_type_of(ccx, fields[i])
- }).collect::<Vec<_>>();
- let real_ty = Type::struct_(ccx, &fields[..], st.packed);
+ let real_ty = Type::struct_(ccx, &llfields[..], st.packed);
bcx.pointercast(self.llval, real_ty.ptr_to())
} else {
self.llval
// * Field is sized - pointer is properly aligned already
if st.offsets[ix] == layout::Size::from_bytes(0) || st.packed ||
bcx.ccx.shared().type_is_sized(fty) {
- return (bcx.struct_gep(ptr_val, st.memory_index[ix] as usize), alignment);
+ return (bcx.struct_gep(
+ ptr_val, adt::struct_llfields_index(st, ix)), alignment);
}
// If the type of the last field is [T] or str, then we don't need to do
// any adjusments
match fty.sty {
ty::TySlice(..) | ty::TyStr => {
- return (bcx.struct_gep(ptr_val, st.memory_index[ix] as usize), alignment);
+ return (bcx.struct_gep(
+ ptr_val, adt::struct_llfields_index(st, ix)), alignment);
}
_ => ()
}
if !self.has_extra() {
debug!("Unsized field `{}`, of `{:?}` has no metadata for adjustment",
ix, Value(ptr_val));
- return (bcx.struct_gep(ptr_val, ix), alignment);
+ return (bcx.struct_gep(ptr_val, adt::struct_llfields_index(st, ix)), alignment);
}
// We need to get the pointer manually now.
// doesn't actually strip the offset when splitting the closure
// environment into its components so it ends up out of bounds.
let env_ptr = if !env_ref {
- let alloc = bcx.alloca(common::val_ty(llval), "__debuginfo_env_ptr");
+ let alloc = bcx.alloca(common::val_ty(llval), "__debuginfo_env_ptr", None);
bcx.store(llval, alloc, None);
alloc
} else {
use rustc::mir::tcx::LvalueTy;
use rustc_data_structures::indexed_vec::Idx;
+use adt;
use base;
use common::{self, CrateContext, C_null};
use builder::Builder;
if common::val_ty(elem) == Type::i1(bcx.ccx) {
elem = bcx.zext(elem, Type::i8(bcx.ccx));
}
+ let layout = bcx.ccx.layout_of(self.ty);
+ let i = if let Layout::Univariant { ref variant, .. } = *layout {
+ adt::struct_llfields_index(variant, i)
+ } else {
+ i
+ };
llpair = bcx.insert_value(llpair, elem, i);
}
self.val = OperandValue::Immediate(llpair);
let (lldata, llextra) = base::load_fat_ptr(bcx, llval, align, ty);
OperandValue::Pair(lldata, llextra)
} else if common::type_is_imm_pair(bcx.ccx, ty) {
- let f_align = match *bcx.ccx.layout_of(ty) {
- Layout::Univariant { ref variant, .. } =>
- Alignment::from_packed(variant.packed) | align,
- _ => align
+ let (ix0, ix1, f_align) = match *bcx.ccx.layout_of(ty) {
+ Layout::Univariant { ref variant, .. } => {
+ (adt::struct_llfields_index(variant, 0),
+ adt::struct_llfields_index(variant, 1),
+ Alignment::from_packed(variant.packed) | align)
+ },
+ _ => (0, 1, align)
};
let [a_ty, b_ty] = common::type_pair_fields(bcx.ccx, ty).unwrap();
- let a_ptr = bcx.struct_gep(llval, 0);
- let b_ptr = bcx.struct_gep(llval, 1);
+ let a_ptr = bcx.struct_gep(llval, ix0);
+ let b_ptr = bcx.struct_gep(llval, ix1);
OperandValue::Pair(
base::load_ty(bcx, a_ptr, f_align, a_ty),
bcx.store(base::from_immediate(bcx, s), lldest, align);
}
OperandValue::Pair(a, b) => {
- let f_align = match *bcx.ccx.layout_of(operand.ty) {
- Layout::Univariant { ref variant, .. } if variant.packed => {
- Some(1)
+ let (ix0, ix1, f_align) = match *bcx.ccx.layout_of(operand.ty) {
+ Layout::Univariant { ref variant, .. } => {
+ (adt::struct_llfields_index(variant, 0),
+ adt::struct_llfields_index(variant, 1),
+ if variant.packed { Some(1) } else { None })
}
- _ => align
+ _ => (0, 1, align)
};
let a = base::from_immediate(bcx, a);
let b = base::from_immediate(bcx, b);
- bcx.store(a, bcx.struct_gep(lldest, 0), f_align);
- bcx.store(b, bcx.struct_gep(lldest, 1), f_align);
+ bcx.store(a, bcx.struct_gep(lldest, ix0), f_align);
+ bcx.store(b, bcx.struct_gep(lldest, ix1), f_align);
}
}
}
_ => {
// If this is a tuple or closure, we need to translate GEP indices.
let layout = bcx.ccx.layout_of(dest.ty.to_ty(bcx.tcx()));
- let translation = if let Layout::Univariant { ref variant, .. } = *layout {
- Some(&variant.memory_index)
- } else {
- None
+ let get_memory_index = |i| {
+ if let Layout::Univariant { ref variant, .. } = *layout {
+ adt::struct_llfields_index(variant, i)
+ } else {
+ i
+ }
};
let alignment = dest.alignment;
for (i, operand) in operands.iter().enumerate() {
// Note: perhaps this should be StructGep, but
// note that in some cases the values here will
// not be structs but arrays.
- let i = if let Some(ref t) = translation {
- t[i] as usize
- } else {
- i
- };
+ let i = get_memory_index(i);
let dest = bcx.gepi(dest.llval, &[0, i]);
self.store_operand(&bcx, dest, alignment.to_align(), op);
}
pub fn size_of(&self, ty: Ty<'tcx>) -> machine::llsize {
self.layout_of(ty).size(self).bytes() as machine::llsize
}
+
+ pub fn over_align_of(&self, t: Ty<'tcx>)
+ -> Option<machine::llalign> {
+ let layout = self.layout_of(t);
+ if let Some(align) = layout.over_align(&self.tcx().data_layout) {
+ Some(align as machine::llalign)
+ } else {
+ None
+ }
+ }
}
fn llvm_type_name<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> String {
if def.repr.simd() {
check_simd(tcx, span, def_id);
}
+
+ // if struct is packed and not aligned, check fields for alignment.
+ // Checks for combining packed and align attrs on single struct are done elsewhere.
+ if tcx.lookup_adt_def(def_id).repr.packed() && tcx.lookup_adt_def(def_id).repr.align == 0 {
+ check_packed(tcx, span, def_id);
+ }
}
fn check_union<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
}
}
+fn check_packed<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, sp: Span, def_id: DefId) {
+ if check_packed_inner(tcx, def_id, &mut Vec::new()) {
+ struct_span_err!(tcx.sess, sp, E0588,
+ "packed struct cannot transitively contain a `[repr(align)]` struct").emit();
+ }
+}
+
+fn check_packed_inner<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+ def_id: DefId,
+ stack: &mut Vec<DefId>) -> bool {
+ let t = tcx.item_type(def_id);
+ if stack.contains(&def_id) {
+ debug!("check_packed_inner: {:?} is recursive", t);
+ return false;
+ }
+ match t.sty {
+ ty::TyAdt(def, substs) if def.is_struct() => {
+ if tcx.lookup_adt_def(def.did).repr.align > 0 {
+ return true;
+ }
+ // push struct def_id before checking fields
+ stack.push(def_id);
+ for field in &def.struct_variant().fields {
+ let f = field.ty(tcx, substs);
+ match f.sty {
+ ty::TyAdt(def, _) => {
+ if check_packed_inner(tcx, def.did, stack) {
+ return true;
+ }
+ }
+ _ => ()
+ }
+ }
+ // only need to pop if not early out
+ stack.pop();
+ }
+ _ => ()
+ }
+ false
+}
+
#[allow(trivial_numeric_casts)]
pub fn check_enum<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
sp: Span,
// but `{}` was found in the type `{}`
E0567, // auto traits can not have type parameters
E0568, // auto-traits can not have predicates,
+ E0588, // packed struct cannot transitively contain a `[repr(align)]` struct
E0592, // duplicate definitions with name `{}`
}
self.meta_item().and_then(|meta_item| meta_item.value_str())
}
+ /// Returns a name and single literal value tuple of the MetaItem.
+ pub fn name_value_literal(&self) -> Option<(Name, &Lit)> {
+ self.meta_item().and_then(
+ |meta_item| meta_item.meta_item_list().and_then(
+ |meta_item_list| {
+ if meta_item_list.len() == 1 {
+ let nested_item = &meta_item_list[0];
+ if nested_item.is_literal() {
+ Some((meta_item.name(), nested_item.literal().unwrap()))
+ } else {
+ None
+ }
+ }
+ else {
+ None
+ }}))
+ }
+
/// Returns a MetaItem if self is a MetaItem with Kind Word.
pub fn word(&self) -> Option<&MetaItem> {
self.meta_item().and_then(|meta_item| if meta_item.is_word() {
continue
}
+ let mut recognised = false;
if let Some(mi) = item.word() {
let word = &*mi.name().as_str();
let hint = match word {
_ => match int_type_of_word(word) {
Some(ity) => Some(ReprInt(ity)),
None => {
- // Not a word we recognize
- span_err!(diagnostic, item.span, E0552,
- "unrecognized representation hint");
None
}
}
};
if let Some(h) = hint {
+ recognised = true;
acc.push(h);
}
- } else {
- span_err!(diagnostic, item.span, E0553,
- "unrecognized enum representation hint");
+ } else if let Some((name, value)) = item.name_value_literal() {
+ if name == "align" {
+ recognised = true;
+ let mut valid_align = false;
+ if let ast::LitKind::Int(align, ast::LitIntType::Unsuffixed) = value.node {
+ if align.is_power_of_two() {
+ // rustc::ty::layout::Align restricts align to <= 32768
+ if align <= 32768 {
+ acc.push(ReprAlign(align as u16));
+ valid_align = true;
+ }
+ }
+ }
+ if !valid_align {
+ span_err!(diagnostic, item.span, E0589,
+ "align representation must be a u16 power of two");
+ }
+ }
+ }
+ if !recognised {
+ // Not a word we recognize
+ span_err!(diagnostic, item.span, E0552,
+ "unrecognized representation hint");
}
}
}
ReprExtern,
ReprPacked,
ReprSimd,
+ ReprAlign(u16),
}
#[derive(Eq, Hash, PartialEq, Debug, RustcEncodable, RustcDecodable, Copy, Clone)]
E0550, // multiple deprecated attributes
E0551, // incorrect meta item
E0552, // unrecognized representation hint
- E0553, // unrecognized enum representation hint
E0554, // #[feature] may not be used on the [] release channel
E0555, // malformed feature attribute, expected #![feature(...)]
E0556, // malformed feature, expected just one word
E0557, // feature has been removed
E0584, // file for module `..` found at both .. and ..
+ E0589, // align representation must be a u16 power of two
}
for a in type_attrs {
for r in &attr::find_repr_attrs(diagnostic, a) {
repr_type_name = match *r {
- attr::ReprPacked | attr::ReprSimd => continue,
+ attr::ReprPacked | attr::ReprSimd | attr::ReprAlign(_) => continue,
attr::ReprExtern => "i32",
attr::ReprInt(attr::SignedInt(ast::IntTy::Is)) => "isize",
// except according to those terms.
#![allow(dead_code)]
+#![feature(attr_literals)]
#![feature(repr_simd)]
#[repr(C)] //~ ERROR: attribute should be applied to struct, enum or union
#[repr(C)]
enum EExtern { A, B }
+#[repr(align(8))] //~ ERROR: attribute should be applied to struct
+enum EAlign { A, B }
+
#[repr(packed)] //~ ERROR: attribute should be applied to struct
enum EPacked { A, B }
// option. This file may not be copied, modified, or distributed
// except according to those terms.
-#![feature(rustc_attrs)]
#![allow(dead_code)]
+#![feature(attr_literals)]
#[repr(C)]
enum A { A }
#[repr(C, packed)]
struct E(i32);
-#[rustc_error]
-fn main() {} //~ ERROR compilation successful
+#[repr(packed, align(8))] //~ ERROR conflicting packed and align representation hints
+struct F(i32);
+
+fn main() {}
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+#![allow(dead_code)]
+#![feature(attr_literals)]
+
+#[repr(align(16.0))] //~ ERROR: align representation must be a u16 power of two
+struct A(i32);
+
+#[repr(align(15))] //~ ERROR: align representation must be a u16 power of two
+struct B(i32);
+
+#[repr(align(65536))] //~ ERROR: align representation must be a u16 power of tw
+struct C(i32);
+
+fn main() {}
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+#![feature(attr_literals)]
+#![allow(dead_code)]
+
+#[repr(align(16))]
+struct A(i32);
+
+struct B(A);
+
+#[repr(packed)]
+struct C(A); //~ ERROR: packed struct cannot transitively contain a `[repr(align)]` struct
+
+#[repr(packed)]
+struct D(B); //~ ERROR: packed struct cannot transitively contain a `[repr(align)]` struct
+
+fn main() {}
--- /dev/null
+// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+#![feature(attr_literals)]
+
+use std::mem;
+
+// Raising alignment
+#[repr(align(16))]
+struct Align16(i32);
+
+// Lowering has no effect
+#[repr(align(1))]
+struct Align1(i32);
+
+// Multiple attributes take the max
+#[repr(align(4))]
+#[repr(align(16))]
+#[repr(align(8))]
+struct AlignMany(i32);
+
+// Raising alignment may not alter size.
+#[repr(align(8))]
+#[allow(dead_code)]
+struct Align8Many {
+ a: i32,
+ b: i32,
+ c: i32,
+ d: u8,
+}
+
+enum Enum {
+ #[allow(dead_code)]
+ A(i32),
+ B(Align16)
+}
+
+// Nested alignment - use `#[repr(C)]` to suppress field reordering for sizeof test
+#[repr(C)]
+struct Nested {
+ a: i32,
+ b: i32,
+ c: Align16,
+ d: i8,
+}
+
+#[repr(packed)]
+struct Packed(i32);
+
+#[repr(align(16))]
+struct AlignContainsPacked {
+ a: Packed,
+ b: Packed,
+}
+
+impl Align16 {
+ // return aligned type
+ pub fn new(i: i32) -> Align16 {
+ Align16(i)
+ }
+ // pass aligned type
+ pub fn consume(a: Align16) -> i32 {
+ a.0
+ }
+}
+
+const CONST_ALIGN16: Align16 = Align16(7);
+static STATIC_ALIGN16: Align16 = Align16(8);
+
+// Check the actual address is aligned
+fn is_aligned_to<T>(p: &T, align: usize) -> bool {
+ let addr = p as *const T as usize;
+ (addr & (align - 1)) == 0
+}
+
+pub fn main() {
+ // check alignment and size by type and value
+ assert_eq!(mem::align_of::<Align16>(), 16);
+ assert_eq!(mem::size_of::<Align16>(), 16);
+
+ let a = Align16(7);
+ assert_eq!(a.0, 7);
+ assert_eq!(mem::align_of_val(&a), 16);
+ assert_eq!(mem::size_of_val(&a), 16);
+
+ assert!(is_aligned_to(&a, 16));
+
+ // lowering should have no effect
+ assert_eq!(mem::align_of::<Align1>(), 4);
+ assert_eq!(mem::size_of::<Align1>(), 4);
+ let a = Align1(7);
+ assert_eq!(a.0, 7);
+ assert_eq!(mem::align_of_val(&a), 4);
+ assert_eq!(mem::size_of_val(&a), 4);
+ assert!(is_aligned_to(&a, 4));
+
+ // when multiple attributes are specified the max should be used
+ assert_eq!(mem::align_of::<AlignMany>(), 16);
+ assert_eq!(mem::size_of::<AlignMany>(), 16);
+ let a = AlignMany(7);
+ assert_eq!(a.0, 7);
+ assert_eq!(mem::align_of_val(&a), 16);
+ assert_eq!(mem::size_of_val(&a), 16);
+ assert!(is_aligned_to(&a, 16));
+
+ // raising alignment should not reduce size
+ assert_eq!(mem::align_of::<Align8Many>(), 8);
+ assert_eq!(mem::size_of::<Align8Many>(), 16);
+ let a = Align8Many { a: 1, b: 2, c: 3, d: 4 };
+ assert_eq!(a.a, 1);
+ assert_eq!(mem::align_of_val(&a), 8);
+ assert_eq!(mem::size_of_val(&a), 16);
+ assert!(is_aligned_to(&a, 8));
+
+ // return type
+ let a = Align16::new(1);
+ assert_eq!(mem::align_of_val(&a), 16);
+ assert_eq!(mem::size_of_val(&a), 16);
+ assert_eq!(a.0, 1);
+ assert!(is_aligned_to(&a, 16));
+ assert_eq!(Align16::consume(a), 1);
+
+ // check const alignment, size and value
+ assert_eq!(mem::align_of_val(&CONST_ALIGN16), 16);
+ assert_eq!(mem::size_of_val(&CONST_ALIGN16), 16);
+ assert_eq!(CONST_ALIGN16.0, 7);
+ assert!(is_aligned_to(&CONST_ALIGN16, 16));
+
+ // check global static alignment, size and value
+ assert_eq!(mem::align_of_val(&STATIC_ALIGN16), 16);
+ assert_eq!(mem::size_of_val(&STATIC_ALIGN16), 16);
+ assert_eq!(STATIC_ALIGN16.0, 8);
+ assert!(is_aligned_to(&STATIC_ALIGN16, 16));
+
+ // Note that the size of Nested may change if struct field re-ordering is enabled
+ assert_eq!(mem::align_of::<Nested>(), 16);
+ assert_eq!(mem::size_of::<Nested>(), 48);
+ let a = Nested{ a: 1, b: 2, c: Align16(3), d: 4};
+ assert_eq!(mem::align_of_val(&a), 16);
+ assert_eq!(mem::align_of_val(&a.b), 4);
+ assert_eq!(mem::align_of_val(&a.c), 16);
+ assert_eq!(mem::size_of_val(&a), 48);
+ assert!(is_aligned_to(&a, 16));
+ // check the correct fields are indexed
+ assert_eq!(a.a, 1);
+ assert_eq!(a.b, 2);
+ assert_eq!(a.c.0, 3);
+ assert_eq!(a.d, 4);
+
+ // enum should be aligned to max alignment
+ assert_eq!(mem::align_of::<Enum>(), 16);
+ assert_eq!(mem::align_of_val(&Enum::B(Align16(0))), 16);
+ let e = Enum::B(Align16(15));
+ match e {
+ Enum::B(ref a) => {
+ assert_eq!(a.0, 15);
+ assert_eq!(mem::align_of_val(a), 16);
+ assert_eq!(mem::size_of_val(a), 16);
+ },
+ _ => ()
+ }
+ assert!(is_aligned_to(&e, 16));
+
+ // arrays of aligned elements should also be aligned
+ assert_eq!(mem::align_of::<[Align16;2]>(), 16);
+ assert_eq!(mem::size_of::<[Align16;2]>(), 32);
+
+ let a = [Align16(0), Align16(1)];
+ assert_eq!(mem::align_of_val(&a[0]), 16);
+ assert_eq!(mem::align_of_val(&a[1]), 16);
+ assert!(is_aligned_to(&a, 16));
+
+ // check heap value is aligned
+ assert_eq!(mem::align_of_val(Box::new(Align16(0)).as_ref()), 16);
+
+ // check heap array is aligned
+ let a = vec!(Align16(0), Align16(1));
+ assert_eq!(mem::align_of_val(&a[0]), 16);
+ assert_eq!(mem::align_of_val(&a[1]), 16);
+
+ assert_eq!(mem::align_of::<AlignContainsPacked>(), 16);
+ assert_eq!(mem::size_of::<AlignContainsPacked>(), 16);
+ let a = AlignContainsPacked { a: Packed(1), b: Packed(2) };
+ assert_eq!(mem::align_of_val(&a), 16);
+ assert_eq!(mem::align_of_val(&a.a), 1);
+ assert_eq!(mem::align_of_val(&a.b), 1);
+ assert_eq!(mem::size_of_val(&a), 16);
+ assert!(is_aligned_to(&a, 16));
+}
--- /dev/null
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// compile-flags: -Z print-type-sizes
+
+// This file illustrates how padding is handled: alignment
+// requirements can lead to the introduction of padding, either before
+// fields or at the end of the structure as a whole.
+//
+// It avoids using u64/i64 because on some targets that is only 4-byte
+// aligned (while on most it is 8-byte aligned) and so the resulting
+// padding and overall computed sizes can be quite different.
+#![feature(attr_literals)]
+#![allow(dead_code)]
+
+#[repr(align(16))]
+#[derive(Default)]
+struct A(i32);
+
+enum E {
+ A(i32),
+ B(A)
+}
+
+#[derive(Default)]
+struct S {
+ a: i32,
+ b: i32,
+ c: A,
+ d: i8,
+}
+
+fn main() {
+ let _s: S = Default::default();
+}
--- /dev/null
+print-type-size type: `E`: 32 bytes, alignment: 16 bytes
+print-type-size discriminant: 4 bytes
+print-type-size variant `A`: 4 bytes
+print-type-size field `.0`: 4 bytes
+print-type-size variant `B`: 28 bytes
+print-type-size padding: 12 bytes
+print-type-size field `.0`: 16 bytes, alignment: 16 bytes
+print-type-size type: `S`: 32 bytes, alignment: 16 bytes
+print-type-size field `.c`: 16 bytes
+print-type-size field `.a`: 4 bytes
+print-type-size field `.b`: 4 bytes
+print-type-size field `.d`: 1 bytes
+print-type-size end padding: 7 bytes
+print-type-size type: `A`: 16 bytes, alignment: 16 bytes
+print-type-size field `.0`: 4 bytes
+print-type-size end padding: 12 bytes