OperandRef { val, layout: place.layout }
}
- fn write_operand_repeatedly(mut self, cg_elem: OperandRef<'tcx, RValue<'gcc>>, count: u64, dest: PlaceRef<'tcx, RValue<'gcc>>) -> Self {
+ fn write_operand_repeatedly(&mut self, cg_elem: OperandRef<'tcx, RValue<'gcc>>, count: u64, dest: PlaceRef<'tcx, RValue<'gcc>>) {
let zero = self.const_usize(0);
let count = self.const_usize(count);
- let start = dest.project_index(&mut self, zero).llval;
- let end = dest.project_index(&mut self, count).llval;
+ let start = dest.project_index(self, zero).llval;
+ let end = dest.project_index(self, count).llval;
let header_bb = self.append_sibling_block("repeat_loop_header");
let body_bb = self.append_sibling_block("repeat_loop_body");
self.switch_to_block(body_bb);
let align = dest.align.restrict_for_offset(dest.layout.field(self.cx(), 0).size);
- cg_elem.val.store(&mut self, PlaceRef::new_sized_aligned(current_val, cg_elem.layout, align));
+ cg_elem.val.store(self, PlaceRef::new_sized_aligned(current_val, cg_elem.layout, align));
let next = self.inbounds_gep(self.backend_type(cg_elem.layout), current.to_rvalue(), &[self.const_usize(1)]);
self.llbb().add_assignment(None, current, next);
self.br(header_bb);
self.switch_to_block(next_bb);
- self
}
fn range_metadata(&mut self, _load: RValue<'gcc>, _range: WrappingRange) {
}
fn write_operand_repeatedly(
- mut self,
+ &mut self,
cg_elem: OperandRef<'tcx, &'ll Value>,
count: u64,
dest: PlaceRef<'tcx, &'ll Value>,
- ) -> Self {
+ ) {
let zero = self.const_usize(0);
let count = self.const_usize(count);
- let start = dest.project_index(&mut self, zero).llval;
- let end = dest.project_index(&mut self, count).llval;
+ let start = dest.project_index(self, zero).llval;
+ let end = dest.project_index(self, count).llval;
let header_bb = self.append_sibling_block("repeat_loop_header");
let body_bb = self.append_sibling_block("repeat_loop_body");
body_bx.br(header_bb);
header_bx.add_incoming_to_phi(current, next, body_bb);
- Self::build(self.cx, next_bb)
+ *self = Self::build(self.cx, next_bb);
}
fn range_metadata(&mut self, load: &'ll Value, range: WrappingRange) {
/// Codegen implementations for some terminator variants.
impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
/// Generates code for a `Resume` terminator.
- fn codegen_resume_terminator(&mut self, helper: TerminatorCodegenHelper<'tcx>, mut bx: Bx) {
+ fn codegen_resume_terminator(&mut self, helper: TerminatorCodegenHelper<'tcx>, bx: &mut Bx) {
if let Some(funclet) = helper.funclet(self) {
bx.cleanup_ret(funclet, None);
} else {
- let slot = self.get_personality_slot(&mut bx);
- let lp0 = slot.project_field(&mut bx, 0);
+ let slot = self.get_personality_slot(bx);
+ let lp0 = slot.project_field(bx, 0);
let lp0 = bx.load_operand(lp0).immediate();
- let lp1 = slot.project_field(&mut bx, 1);
+ let lp1 = slot.project_field(bx, 1);
let lp1 = bx.load_operand(lp1).immediate();
- slot.storage_dead(&mut bx);
+ slot.storage_dead(bx);
let mut lp = bx.const_undef(self.landing_pad_type());
lp = bx.insert_value(lp, lp0, 0);
fn codegen_switchint_terminator(
&mut self,
helper: TerminatorCodegenHelper<'tcx>,
- mut bx: Bx,
+ bx: &mut Bx,
discr: &mir::Operand<'tcx>,
switch_ty: Ty<'tcx>,
targets: &SwitchTargets,
) {
- let discr = self.codegen_operand(&mut bx, &discr);
+ let discr = self.codegen_operand(bx, &discr);
// `switch_ty` is redundant, sanity-check that.
assert_eq!(discr.layout.ty, switch_ty);
let mut target_iter = targets.iter();
}
}
- fn codegen_return_terminator(&mut self, mut bx: Bx) {
+ fn codegen_return_terminator(&mut self, bx: &mut Bx) {
// Call `va_end` if this is the definition of a C-variadic function.
if self.fn_abi.c_variadic {
// The `VaList` "spoofed" argument is just after all the real arguments.
}
PassMode::Direct(_) | PassMode::Pair(..) => {
- let op = self.codegen_consume(&mut bx, mir::Place::return_place().as_ref());
+ let op = self.codegen_consume(bx, mir::Place::return_place().as_ref());
if let Ref(llval, _, align) = op.val {
bx.load(bx.backend_type(op.layout), llval, align)
} else {
- op.immediate_or_packed_pair(&mut bx)
+ op.immediate_or_packed_pair(bx)
}
}
};
let llslot = match op.val {
Immediate(_) | Pair(..) => {
- let scratch = PlaceRef::alloca(&mut bx, self.fn_abi.ret.layout);
- op.val.store(&mut bx, scratch);
+ let scratch = PlaceRef::alloca(bx, self.fn_abi.ret.layout);
+ op.val.store(bx, scratch);
scratch.llval
}
Ref(llval, _, align) => {
fn codegen_drop_terminator(
&mut self,
helper: TerminatorCodegenHelper<'tcx>,
- mut bx: Bx,
+ bx: &mut Bx,
location: mir::Place<'tcx>,
target: mir::BasicBlock,
unwind: Option<mir::BasicBlock>,
if let ty::InstanceDef::DropGlue(_, None) = drop_fn.def {
// we don't actually need to drop anything.
- helper.funclet_br(self, &mut bx, target);
+ helper.funclet_br(self, bx, target);
return;
}
- let place = self.codegen_place(&mut bx, location.as_ref());
+ let place = self.codegen_place(bx, location.as_ref());
let (args1, args2);
let mut args = if let Some(llextra) = place.llextra {
args2 = [place.llval, llextra];
args = &args[..1];
(
meth::VirtualIndex::from_index(ty::COMMON_VTABLE_ENTRIES_DROPINPLACE)
- .get_fn(&mut bx, vtable, ty, &fn_abi),
+ .get_fn(bx, vtable, ty, &fn_abi),
fn_abi,
)
}
debug!("args' = {:?}", args);
(
meth::VirtualIndex::from_index(ty::COMMON_VTABLE_ENTRIES_DROPINPLACE)
- .get_fn(&mut bx, vtable, ty, &fn_abi),
+ .get_fn(bx, vtable, ty, &fn_abi),
fn_abi,
)
}
};
helper.do_call(
self,
- &mut bx,
+ bx,
fn_abi,
drop_fn,
args,
fn codegen_assert_terminator(
&mut self,
helper: TerminatorCodegenHelper<'tcx>,
- mut bx: Bx,
+ bx: &mut Bx,
terminator: &mir::Terminator<'tcx>,
cond: &mir::Operand<'tcx>,
expected: bool,
cleanup: Option<mir::BasicBlock>,
) {
let span = terminator.source_info.span;
- let cond = self.codegen_operand(&mut bx, cond).immediate();
+ let cond = self.codegen_operand(bx, cond).immediate();
let mut const_cond = bx.const_to_opt_u128(cond, false).map(|c| c == 1);
// This case can currently arise only from functions marked
// Don't codegen the panic block if success if known.
if const_cond == Some(expected) {
- helper.funclet_br(self, &mut bx, target);
+ helper.funclet_br(self, bx, target);
return;
}
// After this point, bx is the block for the call to panic.
bx.switch_to_block(panic_block);
- self.set_debug_loc(&mut bx, terminator.source_info);
+ self.set_debug_loc(bx, terminator.source_info);
// Get the location information.
- let location = self.get_caller_location(&mut bx, terminator.source_info).immediate();
+ let location = self.get_caller_location(bx, terminator.source_info).immediate();
// Put together the arguments to the panic entry point.
let (lang_item, args) = match msg {
AssertKind::BoundsCheck { ref len, ref index } => {
- let len = self.codegen_operand(&mut bx, len).immediate();
- let index = self.codegen_operand(&mut bx, index).immediate();
+ let len = self.codegen_operand(bx, len).immediate();
+ let index = self.codegen_operand(bx, index).immediate();
// It's `fn panic_bounds_check(index: usize, len: usize)`,
// and `#[track_caller]` adds an implicit third argument.
(LangItem::PanicBoundsCheck, vec![index, len, location])
}
};
- let (fn_abi, llfn) = common::build_langcall(&bx, Some(span), lang_item);
+ let (fn_abi, llfn) = common::build_langcall(bx, Some(span), lang_item);
// Codegen the actual panic invoke/call.
- helper.do_call(self, &mut bx, fn_abi, llfn, &args, None, cleanup, &[]);
+ helper.do_call(self, bx, fn_abi, llfn, &args, None, cleanup, &[]);
}
fn codegen_abort_terminator(
&mut self,
helper: TerminatorCodegenHelper<'tcx>,
- mut bx: Bx,
+ bx: &mut Bx,
terminator: &mir::Terminator<'tcx>,
) {
let span = terminator.source_info.span;
- self.set_debug_loc(&mut bx, terminator.source_info);
+ self.set_debug_loc(bx, terminator.source_info);
// Obtain the panic entry point.
- let (fn_abi, llfn) = common::build_langcall(&bx, Some(span), LangItem::PanicNoUnwind);
+ let (fn_abi, llfn) = common::build_langcall(bx, Some(span), LangItem::PanicNoUnwind);
// Codegen the actual panic invoke/call.
- helper.do_call(self, &mut bx, fn_abi, llfn, &[], None, None, &[]);
+ helper.do_call(self, bx, fn_abi, llfn, &[], None, None, &[]);
}
/// Returns `true` if this is indeed a panic intrinsic and codegen is done.
fn codegen_call_terminator(
&mut self,
helper: TerminatorCodegenHelper<'tcx>,
- mut bx: Bx,
+ bx: &mut Bx,
terminator: &mir::Terminator<'tcx>,
func: &mir::Operand<'tcx>,
args: &[mir::Operand<'tcx>],
let span = source_info.span;
// Create the callee. This is a fn ptr or zero-sized and hence a kind of scalar.
- let callee = self.codegen_operand(&mut bx, func);
+ let callee = self.codegen_operand(bx, func);
let (instance, mut llfn) = match *callee.layout.ty.kind() {
ty::FnDef(def_id, substs) => (
if let Some(ty::InstanceDef::DropGlue(_, None)) = def {
// Empty drop glue; a no-op.
let target = target.unwrap();
- helper.funclet_br(self, &mut bx, target);
+ helper.funclet_br(self, bx, target);
return;
}
if intrinsic == Some(sym::transmute) {
if let Some(target) = target {
- self.codegen_transmute(&mut bx, &args[0], destination);
- helper.funclet_br(self, &mut bx, target);
+ self.codegen_transmute(bx, &args[0], destination);
+ helper.funclet_br(self, bx, target);
} else {
// If we are trying to transmute to an uninhabited type,
// it is likely there is no allotted destination. In fact,
if self.codegen_panic_intrinsic(
&helper,
- &mut bx,
+ bx,
intrinsic,
instance,
source_info,
// Prepare the return value destination
let ret_dest = if target.is_some() {
let is_intrinsic = intrinsic.is_some();
- self.make_return_dest(&mut bx, destination, &fn_abi.ret, &mut llargs, is_intrinsic)
+ self.make_return_dest(bx, destination, &fn_abi.ret, &mut llargs, is_intrinsic)
} else {
ReturnDest::Nothing
};
if intrinsic == Some(sym::caller_location) {
if let Some(target) = target {
- let location = self
- .get_caller_location(&mut bx, mir::SourceInfo { span: fn_span, ..source_info });
+ let location =
+ self.get_caller_location(bx, mir::SourceInfo { span: fn_span, ..source_info });
if let ReturnDest::IndirectOperand(tmp, _) = ret_dest {
- location.val.store(&mut bx, tmp);
+ location.val.store(bx, tmp);
}
- self.store_return(&mut bx, ret_dest, &fn_abi.ret, location.immediate());
- helper.funclet_br(self, &mut bx, target);
+ self.store_return(bx, ret_dest, &fn_abi.ret, location.immediate());
+ helper.funclet_br(self, bx, target);
}
return;
}
}
}
- self.codegen_operand(&mut bx, arg)
+ self.codegen_operand(bx, arg)
})
.collect();
Self::codegen_intrinsic_call(
- &mut bx,
+ bx,
*instance.as_ref().unwrap(),
&fn_abi,
&args,
);
if let ReturnDest::IndirectOperand(dst, _) = ret_dest {
- self.store_return(&mut bx, ret_dest, &fn_abi.ret, dst.llval);
+ self.store_return(bx, ret_dest, &fn_abi.ret, dst.llval);
}
if let Some(target) = target {
- helper.funclet_br(self, &mut bx, target);
+ helper.funclet_br(self, bx, target);
} else {
bx.unreachable();
}
let mut copied_constant_arguments = vec![];
'make_args: for (i, arg) in first_args.iter().enumerate() {
- let mut op = self.codegen_operand(&mut bx, arg);
+ let mut op = self.codegen_operand(bx, arg);
if let (0, Some(ty::InstanceDef::Virtual(_, idx))) = (i, def) {
match op.val {
&& !op.layout.ty.is_region_ptr()
{
for i in 0..op.layout.fields.count() {
- let field = op.extract_field(&mut bx, i);
+ let field = op.extract_field(bx, i);
if !field.layout.is_zst() {
// we found the one non-zero-sized field that is allowed
// now find *its* non-zero-sized field, or stop if it's a
// data pointer and vtable. Look up the method in the vtable, and pass
// the data pointer as the first argument
llfn = Some(meth::VirtualIndex::from_index(idx).get_fn(
- &mut bx,
+ bx,
meta,
op.layout.ty,
&fn_abi,
Ref(data_ptr, Some(meta), _) => {
// by-value dynamic dispatch
llfn = Some(meth::VirtualIndex::from_index(idx).get_fn(
- &mut bx,
+ bx,
meta,
op.layout.ty,
&fn_abi,
}
// FIXME(dyn-star): Make sure this is done on a &dyn* receiver
let place = op.deref(bx.cx());
- let data_ptr = place.project_field(&mut bx, 0);
- let meta_ptr = place.project_field(&mut bx, 1);
+ let data_ptr = place.project_field(bx, 0);
+ let meta_ptr = place.project_field(bx, 1);
let meta = bx.load_operand(meta_ptr);
llfn = Some(meth::VirtualIndex::from_index(idx).get_fn(
- &mut bx,
+ bx,
meta.immediate(),
op.layout.ty,
&fn_abi,
match (arg, op.val) {
(&mir::Operand::Copy(_), Ref(_, None, _))
| (&mir::Operand::Constant(_), Ref(_, None, _)) => {
- let tmp = PlaceRef::alloca(&mut bx, op.layout);
+ let tmp = PlaceRef::alloca(bx, op.layout);
bx.lifetime_start(tmp.llval, tmp.layout.size);
- op.val.store(&mut bx, tmp);
+ op.val.store(bx, tmp);
op.val = Ref(tmp.llval, None, tmp.align);
copied_constant_arguments.push(tmp);
}
_ => {}
}
- self.codegen_argument(&mut bx, op, &mut llargs, &fn_abi.args[i]);
+ self.codegen_argument(bx, op, &mut llargs, &fn_abi.args[i]);
}
let num_untupled = untuple.map(|tup| {
- self.codegen_arguments_untupled(
- &mut bx,
- tup,
- &mut llargs,
- &fn_abi.args[first_args.len()..],
- )
+ self.codegen_arguments_untupled(bx, tup, &mut llargs, &fn_abi.args[first_args.len()..])
});
let needs_location =
fn_abi,
);
let location =
- self.get_caller_location(&mut bx, mir::SourceInfo { span: fn_span, ..source_info });
+ self.get_caller_location(bx, mir::SourceInfo { span: fn_span, ..source_info });
debug!(
"codegen_call_terminator({:?}): location={:?} (fn_span {:?})",
terminator, location, fn_span
);
let last_arg = fn_abi.args.last().unwrap();
- self.codegen_argument(&mut bx, location, &mut llargs, last_arg);
+ self.codegen_argument(bx, location, &mut llargs, last_arg);
}
let (is_indirect_call, fn_ptr) = match (llfn, instance) {
bx.switch_to_block(bb_pass);
helper.do_call(
self,
- &mut bx,
+ bx,
fn_abi,
fn_ptr,
&llargs,
helper.do_call(
self,
- &mut bx,
+ bx,
fn_abi,
fn_ptr,
&llargs,
fn codegen_asm_terminator(
&mut self,
helper: TerminatorCodegenHelper<'tcx>,
- mut bx: Bx,
+ bx: &mut Bx,
terminator: &mir::Terminator<'tcx>,
template: &[ast::InlineAsmTemplatePiece],
operands: &[mir::InlineAsmOperand<'tcx>],
.iter()
.map(|op| match *op {
mir::InlineAsmOperand::In { reg, ref value } => {
- let value = self.codegen_operand(&mut bx, value);
+ let value = self.codegen_operand(bx, value);
InlineAsmOperandRef::In { reg, value }
}
mir::InlineAsmOperand::Out { reg, late, ref place } => {
- let place = place.map(|place| self.codegen_place(&mut bx, place.as_ref()));
+ let place = place.map(|place| self.codegen_place(bx, place.as_ref()));
InlineAsmOperandRef::Out { reg, late, place }
}
mir::InlineAsmOperand::InOut { reg, late, ref in_value, ref out_place } => {
- let in_value = self.codegen_operand(&mut bx, in_value);
+ let in_value = self.codegen_operand(bx, in_value);
let out_place =
- out_place.map(|out_place| self.codegen_place(&mut bx, out_place.as_ref()));
+ out_place.map(|out_place| self.codegen_place(bx, out_place.as_ref()));
InlineAsmOperandRef::InOut { reg, late, in_value, out_place }
}
mir::InlineAsmOperand::Const { ref value } => {
helper.do_inlineasm(
self,
- &mut bx,
+ bx,
template,
&operands,
options,
impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
pub fn codegen_block(&mut self, bb: mir::BasicBlock) {
let llbb = self.llbb(bb);
- let mut bx = Bx::build(self.cx, llbb);
+ let bx = &mut Bx::build(self.cx, llbb);
let mir = self.mir;
let data = &mir[bb];
debug!("codegen_block({:?}={:?})", bb, data);
for statement in &data.statements {
- bx = self.codegen_statement(bx, statement);
+ self.codegen_statement(bx, statement);
}
self.codegen_terminator(bx, bb, data.terminator());
fn codegen_terminator(
&mut self,
- mut bx: Bx,
+ bx: &mut Bx,
bb: mir::BasicBlock,
terminator: &'tcx mir::Terminator<'tcx>,
) {
let funclet_bb = self.cleanup_kinds[bb].funclet_bb(bb);
let helper = TerminatorCodegenHelper { bb, terminator, funclet_bb };
- self.set_debug_loc(&mut bx, terminator.source_info);
+ self.set_debug_loc(bx, terminator.source_info);
match terminator.kind {
mir::TerminatorKind::Resume => self.codegen_resume_terminator(helper, bx),
}
mir::TerminatorKind::Goto { target } => {
- helper.funclet_br(self, &mut bx, target);
+ helper.funclet_br(self, bx, target);
}
mir::TerminatorKind::SwitchInt { ref discr, switch_ty, ref targets } => {
#[instrument(level = "trace", skip(self, bx))]
pub fn codegen_rvalue(
&mut self,
- mut bx: Bx,
+ bx: &mut Bx,
dest: PlaceRef<'tcx, Bx::Value>,
rvalue: &mir::Rvalue<'tcx>,
- ) -> Bx {
+ ) {
match *rvalue {
mir::Rvalue::Use(ref operand) => {
- let cg_operand = self.codegen_operand(&mut bx, operand);
+ let cg_operand = self.codegen_operand(bx, operand);
// FIXME: consider not copying constants through stack. (Fixable by codegen'ing
// constants into `OperandValue::Ref`; why don’t we do that yet if we don’t?)
- cg_operand.val.store(&mut bx, dest);
- bx
+ cg_operand.val.store(bx, dest);
}
mir::Rvalue::Cast(mir::CastKind::Pointer(PointerCast::Unsize), ref source, _) => {
if bx.cx().is_backend_scalar_pair(dest.layout) {
// Into-coerce of a thin pointer to a fat pointer -- just
// use the operand path.
- let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
- temp.val.store(&mut bx, dest);
- return bx;
+ let temp = self.codegen_rvalue_operand(bx, rvalue);
+ temp.val.store(bx, dest);
+ return;
}
// Unsize of a nontrivial struct. I would prefer for
// this to be eliminated by MIR building, but
// `CoerceUnsized` can be passed by a where-clause,
// so the (generic) MIR may not be able to expand it.
- let operand = self.codegen_operand(&mut bx, source);
+ let operand = self.codegen_operand(bx, source);
match operand.val {
OperandValue::Pair(..) | OperandValue::Immediate(_) => {
// Unsize from an immediate structure. We don't
// index into the struct, and this case isn't
// important enough for it.
debug!("codegen_rvalue: creating ugly alloca");
- let scratch = PlaceRef::alloca(&mut bx, operand.layout);
- scratch.storage_live(&mut bx);
- operand.val.store(&mut bx, scratch);
- base::coerce_unsized_into(&mut bx, scratch, dest);
- scratch.storage_dead(&mut bx);
+ let scratch = PlaceRef::alloca(bx, operand.layout);
+ scratch.storage_live(bx);
+ operand.val.store(bx, scratch);
+ base::coerce_unsized_into(bx, scratch, dest);
+ scratch.storage_dead(bx);
}
OperandValue::Ref(llref, None, align) => {
let source = PlaceRef::new_sized_aligned(llref, operand.layout, align);
- base::coerce_unsized_into(&mut bx, source, dest);
+ base::coerce_unsized_into(bx, source, dest);
}
OperandValue::Ref(_, Some(_), _) => {
bug!("unsized coercion on an unsized rvalue");
}
}
- bx
}
mir::Rvalue::Repeat(ref elem, count) => {
- let cg_elem = self.codegen_operand(&mut bx, elem);
+ let cg_elem = self.codegen_operand(bx, elem);
// Do not generate the loop for zero-sized elements or empty arrays.
if dest.layout.is_zst() {
- return bx;
+ return;
}
if let OperandValue::Immediate(v) = cg_elem.val {
let zero = bx.const_usize(0);
- let start = dest.project_index(&mut bx, zero).llval;
+ let start = dest.project_index(bx, zero).llval;
let size = bx.const_usize(dest.layout.size.bytes());
// Use llvm.memset.p0i8.* to initialize all zero arrays
if bx.cx().const_to_opt_u128(v, false) == Some(0) {
let fill = bx.cx().const_u8(0);
bx.memset(start, fill, size, dest.align, MemFlags::empty());
- return bx;
+ return;
}
// Use llvm.memset.p0i8.* to initialize byte arrays
let v = bx.from_immediate(v);
if bx.cx().val_ty(v) == bx.cx().type_i8() {
bx.memset(start, v, size, dest.align, MemFlags::empty());
- return bx;
+ return;
}
}
let count =
self.monomorphize(count).eval_usize(bx.cx().tcx(), ty::ParamEnv::reveal_all());
- bx.write_operand_repeatedly(cg_elem, count, dest)
+ bx.write_operand_repeatedly(cg_elem, count, dest);
}
mir::Rvalue::Aggregate(ref kind, ref operands) => {
let (dest, active_field_index) = match **kind {
mir::AggregateKind::Adt(adt_did, variant_index, _, _, active_field_index) => {
- dest.codegen_set_discr(&mut bx, variant_index);
+ dest.codegen_set_discr(bx, variant_index);
if bx.tcx().adt_def(adt_did).is_enum() {
- (dest.project_downcast(&mut bx, variant_index), active_field_index)
+ (dest.project_downcast(bx, variant_index), active_field_index)
} else {
(dest, active_field_index)
}
_ => (dest, None),
};
for (i, operand) in operands.iter().enumerate() {
- let op = self.codegen_operand(&mut bx, operand);
+ let op = self.codegen_operand(bx, operand);
// Do not generate stores and GEPis for zero-sized fields.
if !op.layout.is_zst() {
let field_index = active_field_index.unwrap_or(i);
let field = if let mir::AggregateKind::Array(_) = **kind {
let llindex = bx.cx().const_usize(field_index as u64);
- dest.project_index(&mut bx, llindex)
+ dest.project_index(bx, llindex)
} else {
- dest.project_field(&mut bx, field_index)
+ dest.project_field(bx, field_index)
};
- op.val.store(&mut bx, field);
+ op.val.store(bx, field);
}
}
- bx
}
_ => {
assert!(self.rvalue_creates_operand(rvalue, DUMMY_SP));
- let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
- temp.val.store(&mut bx, dest);
- bx
+ let temp = self.codegen_rvalue_operand(bx, rvalue);
+ temp.val.store(bx, dest);
}
}
}
pub fn codegen_rvalue_unsized(
&mut self,
- mut bx: Bx,
+ bx: &mut Bx,
indirect_dest: PlaceRef<'tcx, Bx::Value>,
rvalue: &mir::Rvalue<'tcx>,
- ) -> Bx {
+ ) {
debug!(
"codegen_rvalue_unsized(indirect_dest.llval={:?}, rvalue={:?})",
indirect_dest.llval, rvalue
match *rvalue {
mir::Rvalue::Use(ref operand) => {
- let cg_operand = self.codegen_operand(&mut bx, operand);
- cg_operand.val.store_unsized(&mut bx, indirect_dest);
- bx
+ let cg_operand = self.codegen_operand(bx, operand);
+ cg_operand.val.store_unsized(bx, indirect_dest);
}
_ => bug!("unsized assignment other than `Rvalue::Use`"),
pub fn codegen_rvalue_operand(
&mut self,
- mut bx: Bx,
+ bx: &mut Bx,
rvalue: &mir::Rvalue<'tcx>,
- ) -> (Bx, OperandRef<'tcx, Bx::Value>) {
+ ) -> OperandRef<'tcx, Bx::Value> {
assert!(
self.rvalue_creates_operand(rvalue, DUMMY_SP),
"cannot codegen {:?} to operand",
match *rvalue {
mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => {
- let operand = self.codegen_operand(&mut bx, source);
+ let operand = self.codegen_operand(bx, source);
debug!("cast operand is {:?}", operand);
let cast = bx.cx().layout_of(self.monomorphize(mir_cast_ty));
}
};
let (lldata, llextra) =
- base::unsize_ptr(&mut bx, lldata, operand.layout.ty, cast.ty, llextra);
+ base::unsize_ptr(bx, lldata, operand.layout.ty, cast.ty, llextra);
OperandValue::Pair(lldata, llextra)
}
mir::CastKind::Pointer(PointerCast::MutToConstPointer)
OperandValue::Pair(v, l) => (v, Some(l)),
};
let (lldata, llextra) =
- base::cast_to_dyn_star(&mut bx, lldata, operand.layout, cast.ty, llextra);
+ base::cast_to_dyn_star(bx, lldata, operand.layout, cast.ty, llextra);
OperandValue::Pair(lldata, llextra)
}
mir::CastKind::Pointer(
let ll_t_out = bx.cx().immediate_backend_type(cast);
if operand.layout.abi.is_uninhabited() {
let val = OperandValue::Immediate(bx.cx().const_undef(ll_t_out));
- return (bx, OperandRef { val, layout: cast });
+ return OperandRef { val, layout: cast };
}
let r_t_in =
CastTy::from_ty(operand.layout.ty).expect("bad input type for cast");
OperandValue::Immediate(newval)
}
};
- (bx, OperandRef { val, layout: cast })
+ OperandRef { val, layout: cast }
}
mir::Rvalue::Ref(_, bk, place) => {
self.codegen_place_to_pointer(bx, place, mk_ref)
}
- mir::Rvalue::CopyForDeref(place) => {
- let operand = self.codegen_operand(&mut bx, &Operand::Copy(place));
- (bx, operand)
- }
+ mir::Rvalue::CopyForDeref(place) => self.codegen_operand(bx, &Operand::Copy(place)),
mir::Rvalue::AddressOf(mutability, place) => {
let mk_ptr = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
tcx.mk_ptr(ty::TypeAndMut { ty, mutbl: mutability })
}
mir::Rvalue::Len(place) => {
- let size = self.evaluate_array_len(&mut bx, place);
- let operand = OperandRef {
+ let size = self.evaluate_array_len(bx, place);
+ OperandRef {
val: OperandValue::Immediate(size),
layout: bx.cx().layout_of(bx.tcx().types.usize),
- };
- (bx, operand)
+ }
}
mir::Rvalue::BinaryOp(op, box (ref lhs, ref rhs)) => {
- let lhs = self.codegen_operand(&mut bx, lhs);
- let rhs = self.codegen_operand(&mut bx, rhs);
+ let lhs = self.codegen_operand(bx, lhs);
+ let rhs = self.codegen_operand(bx, rhs);
let llresult = match (lhs.val, rhs.val) {
(
OperandValue::Pair(lhs_addr, lhs_extra),
OperandValue::Pair(rhs_addr, rhs_extra),
) => self.codegen_fat_ptr_binop(
- &mut bx,
+ bx,
op,
lhs_addr,
lhs_extra,
),
(OperandValue::Immediate(lhs_val), OperandValue::Immediate(rhs_val)) => {
- self.codegen_scalar_binop(&mut bx, op, lhs_val, rhs_val, lhs.layout.ty)
+ self.codegen_scalar_binop(bx, op, lhs_val, rhs_val, lhs.layout.ty)
}
_ => bug!(),
};
- let operand = OperandRef {
+ OperandRef {
val: OperandValue::Immediate(llresult),
layout: bx.cx().layout_of(op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty)),
- };
- (bx, operand)
+ }
}
mir::Rvalue::CheckedBinaryOp(op, box (ref lhs, ref rhs)) => {
- let lhs = self.codegen_operand(&mut bx, lhs);
- let rhs = self.codegen_operand(&mut bx, rhs);
+ let lhs = self.codegen_operand(bx, lhs);
+ let rhs = self.codegen_operand(bx, rhs);
let result = self.codegen_scalar_checked_binop(
- &mut bx,
+ bx,
op,
lhs.immediate(),
rhs.immediate(),
);
let val_ty = op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty);
let operand_ty = bx.tcx().intern_tup(&[val_ty, bx.tcx().types.bool]);
- let operand = OperandRef { val: result, layout: bx.cx().layout_of(operand_ty) };
-
- (bx, operand)
+ OperandRef { val: result, layout: bx.cx().layout_of(operand_ty) }
}
mir::Rvalue::UnaryOp(op, ref operand) => {
- let operand = self.codegen_operand(&mut bx, operand);
+ let operand = self.codegen_operand(bx, operand);
let lloperand = operand.immediate();
let is_float = operand.layout.ty.is_floating_point();
let llval = match op {
}
}
};
- (bx, OperandRef { val: OperandValue::Immediate(llval), layout: operand.layout })
+ OperandRef { val: OperandValue::Immediate(llval), layout: operand.layout }
}
mir::Rvalue::Discriminant(ref place) => {
let discr_ty = rvalue.ty(self.mir, bx.tcx());
let discr_ty = self.monomorphize(discr_ty);
- let discr = self
- .codegen_place(&mut bx, place.as_ref())
- .codegen_get_discr(&mut bx, discr_ty);
- (
- bx,
- OperandRef {
- val: OperandValue::Immediate(discr),
- layout: self.cx.layout_of(discr_ty),
- },
- )
+ let discr = self.codegen_place(bx, place.as_ref()).codegen_get_discr(bx, discr_ty);
+ OperandRef {
+ val: OperandValue::Immediate(discr),
+ layout: self.cx.layout_of(discr_ty),
+ }
}
mir::Rvalue::NullaryOp(null_op, ty) => {
};
let val = bx.cx().const_usize(val);
let tcx = self.cx.tcx();
- (
- bx,
- OperandRef {
- val: OperandValue::Immediate(val),
- layout: self.cx.layout_of(tcx.types.usize),
- },
- )
+ OperandRef {
+ val: OperandValue::Immediate(val),
+ layout: self.cx.layout_of(tcx.types.usize),
+ }
}
mir::Rvalue::ThreadLocalRef(def_id) => {
assert!(bx.cx().tcx().is_static(def_id));
let static_ = bx.get_static(def_id);
let layout = bx.layout_of(bx.cx().tcx().static_ptr_ty(def_id));
- let operand = OperandRef::from_immediate_or_packed_pair(&mut bx, static_, layout);
- (bx, operand)
- }
- mir::Rvalue::Use(ref operand) => {
- let operand = self.codegen_operand(&mut bx, operand);
- (bx, operand)
+ OperandRef::from_immediate_or_packed_pair(bx, static_, layout)
}
+ mir::Rvalue::Use(ref operand) => self.codegen_operand(bx, operand),
mir::Rvalue::Repeat(..) | mir::Rvalue::Aggregate(..) => {
// According to `rvalue_creates_operand`, only ZST
// aggregate rvalues are allowed to be operands.
let ty = rvalue.ty(self.mir, self.cx.tcx());
- let operand =
- OperandRef::new_zst(&mut bx, self.cx.layout_of(self.monomorphize(ty)));
- (bx, operand)
+ OperandRef::new_zst(bx, self.cx.layout_of(self.monomorphize(ty)))
}
mir::Rvalue::ShallowInitBox(ref operand, content_ty) => {
- let operand = self.codegen_operand(&mut bx, operand);
+ let operand = self.codegen_operand(bx, operand);
let lloperand = operand.immediate();
let content_ty = self.monomorphize(content_ty);
let llty_ptr = bx.cx().backend_type(box_layout);
let val = bx.pointercast(lloperand, llty_ptr);
- let operand = OperandRef { val: OperandValue::Immediate(val), layout: box_layout };
- (bx, operand)
+ OperandRef { val: OperandValue::Immediate(val), layout: box_layout }
}
}
}
/// Codegen an `Rvalue::AddressOf` or `Rvalue::Ref`
fn codegen_place_to_pointer(
&mut self,
- mut bx: Bx,
+ bx: &mut Bx,
place: mir::Place<'tcx>,
mk_ptr_ty: impl FnOnce(TyCtxt<'tcx>, Ty<'tcx>) -> Ty<'tcx>,
- ) -> (Bx, OperandRef<'tcx, Bx::Value>) {
- let cg_place = self.codegen_place(&mut bx, place.as_ref());
+ ) -> OperandRef<'tcx, Bx::Value> {
+ let cg_place = self.codegen_place(bx, place.as_ref());
let ty = cg_place.layout.ty;
} else {
OperandValue::Pair(cg_place.llval, cg_place.llextra.unwrap())
};
- (bx, OperandRef { val, layout: self.cx.layout_of(mk_ptr_ty(self.cx.tcx(), ty)) })
+ OperandRef { val, layout: self.cx.layout_of(mk_ptr_ty(self.cx.tcx(), ty)) }
}
pub fn codegen_scalar_binop(
impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
#[instrument(level = "debug", skip(self, bx))]
- pub fn codegen_statement(&mut self, mut bx: Bx, statement: &mir::Statement<'tcx>) -> Bx {
- self.set_debug_loc(&mut bx, statement.source_info);
+ pub fn codegen_statement(&mut self, bx: &mut Bx, statement: &mir::Statement<'tcx>) {
+ self.set_debug_loc(bx, statement.source_info);
match statement.kind {
mir::StatementKind::Assign(box (ref place, ref rvalue)) => {
if let Some(index) = place.as_local() {
self.codegen_rvalue_unsized(bx, cg_indirect_dest, rvalue)
}
LocalRef::Operand(None) => {
- let (mut bx, operand) = self.codegen_rvalue_operand(bx, rvalue);
+ let operand = self.codegen_rvalue_operand(bx, rvalue);
self.locals[index] = LocalRef::Operand(Some(operand));
- self.debug_introduce_local(&mut bx, index);
- bx
+ self.debug_introduce_local(bx, index);
}
LocalRef::Operand(Some(op)) => {
if !op.layout.is_zst() {
// If the type is zero-sized, it's already been set here,
// but we still need to make sure we codegen the operand
- self.codegen_rvalue_operand(bx, rvalue).0
+ self.codegen_rvalue_operand(bx, rvalue);
}
}
} else {
- let cg_dest = self.codegen_place(&mut bx, place.as_ref());
- self.codegen_rvalue(bx, cg_dest, rvalue)
+ let cg_dest = self.codegen_place(bx, place.as_ref());
+ self.codegen_rvalue(bx, cg_dest, rvalue);
}
}
mir::StatementKind::SetDiscriminant { box ref place, variant_index } => {
- self.codegen_place(&mut bx, place.as_ref())
- .codegen_set_discr(&mut bx, variant_index);
- bx
+ self.codegen_place(bx, place.as_ref()).codegen_set_discr(bx, variant_index);
}
mir::StatementKind::Deinit(..) => {
// For now, don't codegen this to anything. In the future it may be worth
// experimenting with what kind of information we can emit to LLVM without hurting
// perf here
- bx
}
mir::StatementKind::StorageLive(local) => {
if let LocalRef::Place(cg_place) = self.locals[local] {
- cg_place.storage_live(&mut bx);
+ cg_place.storage_live(bx);
} else if let LocalRef::UnsizedPlace(cg_indirect_place) = self.locals[local] {
- cg_indirect_place.storage_live(&mut bx);
+ cg_indirect_place.storage_live(bx);
}
- bx
}
mir::StatementKind::StorageDead(local) => {
if let LocalRef::Place(cg_place) = self.locals[local] {
- cg_place.storage_dead(&mut bx);
+ cg_place.storage_dead(bx);
} else if let LocalRef::UnsizedPlace(cg_indirect_place) = self.locals[local] {
- cg_indirect_place.storage_dead(&mut bx);
+ cg_indirect_place.storage_dead(bx);
}
- bx
}
mir::StatementKind::Coverage(box ref coverage) => {
- self.codegen_coverage(&mut bx, coverage.clone(), statement.source_info.scope);
- bx
+ self.codegen_coverage(bx, coverage.clone(), statement.source_info.scope);
}
mir::StatementKind::Intrinsic(box NonDivergingIntrinsic::Assume(ref op)) => {
- let op_val = self.codegen_operand(&mut bx, op);
+ let op_val = self.codegen_operand(bx, op);
bx.assume(op_val.immediate());
- bx
}
mir::StatementKind::Intrinsic(box NonDivergingIntrinsic::CopyNonOverlapping(
mir::CopyNonOverlapping { ref count, ref src, ref dst },
)) => {
- let dst_val = self.codegen_operand(&mut bx, dst);
- let src_val = self.codegen_operand(&mut bx, src);
- let count = self.codegen_operand(&mut bx, count).immediate();
+ let dst_val = self.codegen_operand(bx, dst);
+ let src_val = self.codegen_operand(bx, src);
+ let count = self.codegen_operand(bx, count).immediate();
let pointee_layout = dst_val
.layout
- .pointee_info_at(&bx, rustc_target::abi::Size::ZERO)
+ .pointee_info_at(bx, rustc_target::abi::Size::ZERO)
.expect("Expected pointer");
let bytes = bx.mul(count, bx.const_usize(pointee_layout.size.bytes()));
let dst = dst_val.immediate();
let src = src_val.immediate();
bx.memcpy(dst, align, src, align, bytes, crate::MemFlags::empty());
- bx
}
mir::StatementKind::FakeRead(..)
| mir::StatementKind::Retag { .. }
| mir::StatementKind::AscribeUserType(..)
- | mir::StatementKind::Nop => bx,
+ | mir::StatementKind::Nop => {}
}
}
}
/// Called for Rvalue::Repeat when the elem is neither a ZST nor optimizable using memset.
fn write_operand_repeatedly(
- self,
+ &mut self,
elem: OperandRef<'tcx, Self::Value>,
count: u64,
dest: PlaceRef<'tcx, Self::Value>,
- ) -> Self;
+ );
fn range_metadata(&mut self, load: Self::Value, range: WrappingRange);
fn nonnull_metadata(&mut self, load: Self::Value);