so rename it `new_sized_aligned`.
6/11 use `align` = `layout.align.abi`.
`from_const_alloc` uses `alloc.align`, but that is `assert_eq!` to `layout.align.abi`.
only 4/11 use something interesting for `align`.
let align = dest.align.restrict_for_offset(dest.layout.field(self.cx(), 0).size);
cg_elem.val.store(&mut body_bx,
- PlaceRef::new_sized(current, cg_elem.layout, align));
+ PlaceRef::new_sized_aligned(current, cg_elem.layout, align));
let next = body_bx.inbounds_gep(current, &[self.const_usize(1)]);
body_bx.br(header_bx.llbb());
)};
self.const_bitcast(llval, llty)
};
- PlaceRef::new_sized(llval, layout, alloc.align)
+ PlaceRef::new_sized(llval, layout)
}
fn const_ptrcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value {
let name = &*tcx.item_name(def_id).as_str();
let llret_ty = self.layout_of(ret_ty).llvm_type(self);
- let result = PlaceRef::new_sized(llresult, fn_ty.ret.layout, fn_ty.ret.layout.align.abi);
+ let result = PlaceRef::new_sized(llresult, fn_ty.ret.layout);
let simple = get_simple_intrinsic(self, name);
let llval = match name {
// Handle both by-ref and immediate tuples.
if let Ref(llval, None, align) = tuple.val {
- let tuple_ptr = PlaceRef::new_sized(llval, tuple.layout, align);
+ let tuple_ptr = PlaceRef::new_sized_aligned(llval, tuple.layout, align);
for i in 0..tuple.layout.fields.count() {
let field_ptr = tuple_ptr.project_field(bx, i);
let field = bx.load_operand(field_ptr);
let llty = bx.backend_type(src.layout);
let cast_ptr = bx.pointercast(dst.llval, bx.type_ptr_to(llty));
let align = src.layout.align.abi.min(dst.align);
- src.val.store(bx, PlaceRef::new_sized(cast_ptr, src.layout, align));
+ src.val.store(bx, PlaceRef::new_sized_aligned(cast_ptr, src.layout, align));
}
if local == mir::RETURN_PLACE && fx.fn_ty.ret.is_indirect() {
debug!("alloc: {:?} (return place) -> place", local);
let llretptr = bx.get_param(0);
- LocalRef::Place(PlaceRef::new_sized(llretptr, layout, layout.align.abi))
+ LocalRef::Place(PlaceRef::new_sized(llretptr, layout))
} else if memory_locals.contains(local) {
debug!("alloc: {:?} -> place", local);
if layout.is_unsized() {
let llarg = bx.get_param(llarg_idx);
bx.set_value_name(llarg, &name);
llarg_idx += 1;
- PlaceRef::new_sized(llarg, arg.layout, arg.layout.align.abi)
+ PlaceRef::new_sized(llarg, arg.layout)
} else if arg.is_unsized_indirect() {
// As the storage for the indirect argument lives during
// the whole function call, we just copy the fat pointer.
bx.load_operand(PlaceRef::new_sized(
bx.cx().const_undef(bx.cx().type_ptr_to(bx.cx().backend_type(layout))),
layout,
- layout.align.abi,
))
})
}
pub fn new_sized(
llval: V,
layout: TyLayout<'tcx>,
+ ) -> PlaceRef<'tcx, V> {
+ assert!(!layout.is_unsized());
+ PlaceRef {
+ llval,
+ llextra: None,
+ layout,
+ align: layout.align.abi
+ }
+ }
+
+ pub fn new_sized_aligned(
+ llval: V,
+ layout: TyLayout<'tcx>,
align: Align,
) -> PlaceRef<'tcx, V> {
assert!(!layout.is_unsized());
debug!("alloca({:?}: {:?})", name, layout);
assert!(!layout.is_unsized(), "tried to statically allocate unsized place");
let tmp = bx.alloca(bx.cx().backend_type(layout), name, layout.align.abi);
- Self::new_sized(tmp, layout, layout.align.abi)
+ Self::new_sized(tmp, layout)
}
/// Returns a place for an indirect reference to an unsized place.
let llval = bx.cx().const_undef(
bx.cx().type_ptr_to(bx.cx().backend_type(layout))
);
- PlaceRef::new_sized(llval, layout, layout.align.abi)
+ PlaceRef::new_sized(llval, layout)
}
}
}
scratch.storage_dead(&mut bx);
}
OperandValue::Ref(llref, None, align) => {
- let source = PlaceRef::new_sized(llref, operand.layout, align);
+ let source = PlaceRef::new_sized_aligned(llref, operand.layout, align);
base::coerce_unsized_into(&mut bx, source, dest);
}
OperandValue::Ref(_, Some(_), _) => {