// We instead thus allocate some scratch space...
let scratch_size = cast.size(bx);
let scratch_align = cast.align(bx);
- let llscratch = bx.alloca(cast.llvm_type(bx), "abi_cast", scratch_align);
+ let llscratch = bx.alloca(cast.llvm_type(bx), scratch_align);
bx.lifetime_start(llscratch, scratch_size);
// ...where we first store the value...
)
}
- fn alloca(&mut self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value {
+ fn alloca(&mut self, ty: &'ll Type, align: Align) -> &'ll Value {
let mut bx = Builder::with_cx(self.cx);
bx.position_at_start(unsafe {
llvm::LLVMGetFirstBasicBlock(self.llfn())
});
- bx.dynamic_alloca(ty, name, align)
+ bx.dynamic_alloca(ty, align)
}
- fn dynamic_alloca(&mut self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value {
+ fn dynamic_alloca(&mut self, ty: &'ll Type, align: Align) -> &'ll Value {
unsafe {
- let alloca = if name.is_empty() {
- llvm::LLVMBuildAlloca(self.llbuilder, ty, UNNAMED)
- } else {
- let name = SmallCStr::new(name);
- llvm::LLVMBuildAlloca(self.llbuilder, ty,
- name.as_ptr())
- };
+ let alloca = llvm::LLVMBuildAlloca(self.llbuilder, ty, UNNAMED);
llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
alloca
}
fn array_alloca(&mut self,
ty: &'ll Type,
len: &'ll Value,
- name: &str,
align: Align) -> &'ll Value {
unsafe {
- let alloca = if name.is_empty() {
- llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len, UNNAMED)
- } else {
- let name = SmallCStr::new(name);
- llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len,
- name.as_ptr())
- };
+ let alloca = llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len, UNNAMED);
llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
alloca
}
// More information can be found in libstd's seh.rs implementation.
let i64p = bx.type_ptr_to(bx.type_i64());
let ptr_align = bx.tcx().data_layout.pointer_align.abi;
- let slot = bx.alloca(i64p, "slot", ptr_align);
+ let slot = bx.alloca(i64p, ptr_align);
bx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(), None);
normal.ret(bx.const_i32(0));
let llslot = match op.val {
Immediate(_) | Pair(..) => {
let scratch =
- PlaceRef::alloca(&mut bx, self.fn_ty.ret.layout, "ret");
+ PlaceRef::alloca(&mut bx, self.fn_ty.ret.layout);
op.val.store(&mut bx, scratch);
scratch.llval
}
match (arg, op.val) {
(&mir::Operand::Copy(_), Ref(_, None, _)) |
(&mir::Operand::Constant(_), Ref(_, None, _)) => {
- let tmp = PlaceRef::alloca(&mut bx, op.layout, "const");
+ let tmp = PlaceRef::alloca(&mut bx, op.layout);
op.val.store(&mut bx, tmp);
op.val = Ref(tmp.llval, None, tmp.align);
}
Immediate(_) | Pair(..) => {
match arg.mode {
PassMode::Indirect(..) | PassMode::Cast(_) => {
- let scratch = PlaceRef::alloca(bx, arg.layout, "arg");
+ let scratch = PlaceRef::alloca(bx, arg.layout);
op.val.store(bx, scratch);
(scratch.llval, scratch.align, true)
}
// think that ATM (Rust 1.16) we only pass temporaries, but we shouldn't
// have scary latent bugs around.
- let scratch = PlaceRef::alloca(bx, arg.layout, "arg");
+ let scratch = PlaceRef::alloca(bx, arg.layout);
base::memcpy_ty(bx, scratch.llval, scratch.align, llval, align,
op.layout, MemFlags::empty());
(scratch.llval, scratch.align, true)
cx.tcx().mk_mut_ptr(cx.tcx().types.u8),
cx.tcx().types.i32
]));
- let slot = PlaceRef::alloca(bx, layout, "personalityslot");
+ let slot = PlaceRef::alloca(bx, layout);
self.personality_slot = Some(slot);
slot
}
return if fn_ret.is_indirect() {
// Odd, but possible, case, we have an operand temporary,
// but the calling convention has an indirect return.
- let tmp = PlaceRef::alloca(bx, fn_ret.layout, "tmp_ret");
+ let tmp = PlaceRef::alloca(bx, fn_ret.layout);
tmp.storage_live(bx);
llargs.push(tmp.llval);
ReturnDest::IndirectOperand(tmp, index)
// Currently, intrinsics always need a location to store
// the result, so we create a temporary `alloca` for the
// result.
- let tmp = PlaceRef::alloca(bx, fn_ret.layout, "tmp_ret");
+ let tmp = PlaceRef::alloca(bx, fn_ret.layout);
tmp.storage_live(bx);
ReturnDest::IndirectOperand(tmp, index)
} else {
LocalRef::Operand(None) => {
let dst_layout = bx.layout_of(self.monomorphized_place_ty(&dst.as_ref()));
assert!(!dst_layout.ty.has_erasable_regions());
- let place = PlaceRef::alloca(bx, dst_layout, "transmute_temp");
+ let place = PlaceRef::alloca(bx, dst_layout);
place.storage_live(bx);
self.codegen_transmute_into(bx, src, place);
let op = bx.load_operand(place);
DirectOperand(index) => {
// If there is a cast, we have to store and reload.
let op = if let PassMode::Cast(_) = ret_ty.mode {
- let tmp = PlaceRef::alloca(bx, ret_ty.layout, "tmp_ret");
+ let tmp = PlaceRef::alloca(bx, ret_ty.layout);
tmp.storage_live(bx);
bx.store_arg_ty(&ret_ty, llval, tmp);
let op = bx.load_operand(tmp);
debug!("alloc: {:?} ({}) -> place", local, name);
if layout.is_unsized() {
let indirect_place =
- PlaceRef::alloca_unsized_indirect(&mut bx, layout, &name.as_str());
+ PlaceRef::alloca_unsized_indirect(&mut bx, layout);
+ bx.set_var_name(indirect_place.llval, name);
// FIXME: add an appropriate debuginfo
LocalRef::UnsizedPlace(indirect_place)
} else {
- let place = PlaceRef::alloca(&mut bx, layout, &name.as_str());
+ let place = PlaceRef::alloca(&mut bx, layout);
+ bx.set_var_name(place.llval, name);
if dbg {
let (scope, span) = fx.debug_loc(mir::SourceInfo {
span: decl.source_info.span,
} else if memory_locals.contains(local) {
debug!("alloc: {:?} -> place", local);
if layout.is_unsized() {
- let indirect_place = PlaceRef::alloca_unsized_indirect(
- &mut bx,
- layout,
- &format!("{:?}", local),
- );
+ let indirect_place = PlaceRef::alloca_unsized_indirect(&mut bx, layout);
+ bx.set_var_name(indirect_place.llval, format_args!("{:?}", local));
LocalRef::UnsizedPlace(indirect_place)
} else {
- LocalRef::Place(PlaceRef::alloca(&mut bx, layout, &format!("{:?}", local)))
+ let place = PlaceRef::alloca(&mut bx, layout);
+ bx.set_var_name(place.llval, format_args!("{:?}", local));
+ LocalRef::Place(place)
}
} else {
// If this is an immediate local, we do not create an
_ => bug!("spread argument isn't a tuple?!")
};
- let place = PlaceRef::alloca(bx, bx.layout_of(arg_ty), &name);
+ let place = PlaceRef::alloca(bx, bx.layout_of(arg_ty));
+ bx.set_var_name(place.llval, name);
for i in 0..tupled_arg_tys.len() {
let arg = &fx.fn_ty.args[idx];
idx += 1;
llarg_idx += 1;
let indirect_operand = OperandValue::Pair(llarg, llextra);
- let tmp = PlaceRef::alloca_unsized_indirect(bx, arg.layout, &name);
+ let tmp = PlaceRef::alloca_unsized_indirect(bx, arg.layout);
+ bx.set_var_name(tmp.llval, name);
indirect_operand.store(bx, tmp);
tmp
} else {
- let tmp = PlaceRef::alloca(bx, arg.layout, &name);
+ let tmp = PlaceRef::alloca(bx, arg.layout);
+ bx.set_var_name(tmp.llval, name);
if fx.fn_ty.c_variadic && last_arg_idx.map(|idx| arg_index == idx).unwrap_or(false) {
let va_list_did = match tcx.lang_items().va_list() {
Some(did) => did,
// Allocate an appropriate region on the stack, and copy the value into it
let (llsize, _) = glue::size_and_align_of_dst(bx, unsized_ty, Some(llextra));
- let lldst = bx.array_alloca(bx.cx().type_i8(), llsize, "unsized_tmp", max_align);
+ let lldst = bx.array_alloca(bx.cx().type_i8(), llsize, max_align);
bx.memcpy(lldst, max_align, llptr, min_align, llsize, flags);
// Store the allocated region and the extra to the indirect place.
pub fn alloca<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
bx: &mut Bx,
layout: TyLayout<'tcx>,
- name: &str
) -> Self {
- debug!("alloca({:?}: {:?})", name, layout);
assert!(!layout.is_unsized(), "tried to statically allocate unsized place");
- let tmp = bx.alloca(bx.cx().backend_type(layout), name, layout.align.abi);
+ let tmp = bx.alloca(bx.cx().backend_type(layout), layout.align.abi);
Self::new_sized(tmp, layout)
}
pub fn alloca_unsized_indirect<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
bx: &mut Bx,
layout: TyLayout<'tcx>,
- name: &str,
) -> Self {
- debug!("alloca_unsized_indirect({:?}: {:?})", name, layout);
assert!(layout.is_unsized(), "tried to allocate indirect place for sized values");
let ptr_ty = bx.cx().tcx().mk_mut_ptr(layout.ty);
let ptr_layout = bx.cx().layout_of(ptr_ty);
- Self::alloca(bx, ptr_layout, name)
+ Self::alloca(bx, ptr_layout)
}
pub fn len<Cx: ConstMethods<'tcx, Value = V>>(
// index into the struct, and this case isn't
// important enough for it.
debug!("codegen_rvalue: creating ugly alloca");
- let scratch = PlaceRef::alloca(&mut bx, operand.layout, "__unsize_temp");
+ let scratch = PlaceRef::alloca(&mut bx, operand.layout);
scratch.storage_live(&mut bx);
operand.val.store(&mut bx, scratch);
base::coerce_unsized_into(&mut bx, scratch, dest);
rhs: Self::Value,
) -> (Self::Value, Self::Value);
- fn alloca(&mut self, ty: Self::Type, name: &str, align: Align) -> Self::Value;
- fn dynamic_alloca(&mut self, ty: Self::Type, name: &str, align: Align) -> Self::Value;
+ fn alloca(&mut self, ty: Self::Type, align: Align) -> Self::Value;
+ fn dynamic_alloca(&mut self, ty: Self::Type, align: Align) -> Self::Value;
fn array_alloca(
&mut self,
ty: Self::Type,
len: Self::Value,
- name: &str,
align: Align,
) -> Self::Value;
let _s = S;
// Check that the personality slot alloca gets a lifetime start in each cleanup block, not just
// in the first one.
+ // CHECK: [[SLOT:%[0-9]+]] = alloca { i8*, i32 }
// CHECK-LABEL: cleanup:
- // CHECK: bitcast{{.*}}personalityslot
- // CHECK-NEXT: call void @llvm.lifetime.start
+ // CHECK: [[BITCAST:%[0-9]+]] = bitcast { i8*, i32 }* [[SLOT]] to i8*
+ // CHECK-NEXT: call void @llvm.lifetime.start.{{.*}}({{.*}}, i8* [[BITCAST]])
// CHECK-LABEL: cleanup1:
- // CHECK: bitcast{{.*}}personalityslot
- // CHECK-NEXT: call void @llvm.lifetime.start
+ // CHECK: [[BITCAST1:%[0-9]+]] = bitcast { i8*, i32 }* [[SLOT]] to i8*
+ // CHECK-NEXT: call void @llvm.lifetime.start.{{.*}}({{.*}}, i8* [[BITCAST1]])
might_unwind();
let _t = S;
might_unwind();