]> git.lizzy.rs Git - rust.git/commitdiff
Use `&mut Bx` more.
authorNicholas Nethercote <n.nethercote@gmail.com>
Wed, 9 Nov 2022 00:04:10 +0000 (11:04 +1100)
committerNicholas Nethercote <n.nethercote@gmail.com>
Wed, 16 Nov 2022 04:46:39 +0000 (15:46 +1100)
For the next commit, `FunctionCx::codegen_*_terminator` need to take a
`&mut Bx` instead of consuming a `Bx`. This triggers a cascade of
similar changes across multiple functions. The resulting code is more
concise and replaces many `&mut bx` expressions with `bx`.

compiler/rustc_codegen_gcc/src/builder.rs
compiler/rustc_codegen_llvm/src/builder.rs
compiler/rustc_codegen_ssa/src/mir/block.rs
compiler/rustc_codegen_ssa/src/mir/rvalue.rs
compiler/rustc_codegen_ssa/src/mir/statement.rs
compiler/rustc_codegen_ssa/src/traits/builder.rs

index a314b7cc2152ddf7d1a74ec5a16300e51e212d05..782f6856654a6f974f8043c72e6a4e543a8b9410 100644 (file)
@@ -755,11 +755,11 @@ fn scalar_load_metadata<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, load:
         OperandRef { val, layout: place.layout }
     }
 
-    fn write_operand_repeatedly(mut self, cg_elem: OperandRef<'tcx, RValue<'gcc>>, count: u64, dest: PlaceRef<'tcx, RValue<'gcc>>) -> Self {
+    fn write_operand_repeatedly(&mut self, cg_elem: OperandRef<'tcx, RValue<'gcc>>, count: u64, dest: PlaceRef<'tcx, RValue<'gcc>>) {
         let zero = self.const_usize(0);
         let count = self.const_usize(count);
-        let start = dest.project_index(&mut self, zero).llval;
-        let end = dest.project_index(&mut self, count).llval;
+        let start = dest.project_index(self, zero).llval;
+        let end = dest.project_index(self, count).llval;
 
         let header_bb = self.append_sibling_block("repeat_loop_header");
         let body_bb = self.append_sibling_block("repeat_loop_body");
@@ -778,14 +778,13 @@ fn write_operand_repeatedly(mut self, cg_elem: OperandRef<'tcx, RValue<'gcc>>, c
 
         self.switch_to_block(body_bb);
         let align = dest.align.restrict_for_offset(dest.layout.field(self.cx(), 0).size);
-        cg_elem.val.store(&mut self, PlaceRef::new_sized_aligned(current_val, cg_elem.layout, align));
+        cg_elem.val.store(self, PlaceRef::new_sized_aligned(current_val, cg_elem.layout, align));
 
         let next = self.inbounds_gep(self.backend_type(cg_elem.layout), current.to_rvalue(), &[self.const_usize(1)]);
         self.llbb().add_assignment(None, current, next);
         self.br(header_bb);
 
         self.switch_to_block(next_bb);
-        self
     }
 
     fn range_metadata(&mut self, _load: RValue<'gcc>, _range: WrappingRange) {
index 9cb36ce7f1890b967d8ca7f4b948b98c0b4f53bd..77dd15ef4d8078e8c37162776318f6068b272687 100644 (file)
@@ -556,15 +556,15 @@ fn scalar_load_metadata<'a, 'll, 'tcx>(
     }
 
     fn write_operand_repeatedly(
-        mut self,
+        &mut self,
         cg_elem: OperandRef<'tcx, &'ll Value>,
         count: u64,
         dest: PlaceRef<'tcx, &'ll Value>,
-    ) -> Self {
+    ) {
         let zero = self.const_usize(0);
         let count = self.const_usize(count);
-        let start = dest.project_index(&mut self, zero).llval;
-        let end = dest.project_index(&mut self, count).llval;
+        let start = dest.project_index(self, zero).llval;
+        let end = dest.project_index(self, count).llval;
 
         let header_bb = self.append_sibling_block("repeat_loop_header");
         let body_bb = self.append_sibling_block("repeat_loop_body");
@@ -592,7 +592,7 @@ fn write_operand_repeatedly(
         body_bx.br(header_bb);
         header_bx.add_incoming_to_phi(current, next, body_bb);
 
-        Self::build(self.cx, next_bb)
+        *self = Self::build(self.cx, next_bb);
     }
 
     fn range_metadata(&mut self, load: &'ll Value, range: WrappingRange) {
index 0802067cde65d85478612cfa1021312d89cfe3f3..c7334e19838bdb5acd2a95a9c7b237e828091446 100644 (file)
@@ -256,16 +256,16 @@ fn do_inlineasm<Bx: BuilderMethods<'a, 'tcx>>(
 /// Codegen implementations for some terminator variants.
 impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
     /// Generates code for a `Resume` terminator.
-    fn codegen_resume_terminator(&mut self, helper: TerminatorCodegenHelper<'tcx>, mut bx: Bx) {
+    fn codegen_resume_terminator(&mut self, helper: TerminatorCodegenHelper<'tcx>, bx: &mut Bx) {
         if let Some(funclet) = helper.funclet(self) {
             bx.cleanup_ret(funclet, None);
         } else {
-            let slot = self.get_personality_slot(&mut bx);
-            let lp0 = slot.project_field(&mut bx, 0);
+            let slot = self.get_personality_slot(bx);
+            let lp0 = slot.project_field(bx, 0);
             let lp0 = bx.load_operand(lp0).immediate();
-            let lp1 = slot.project_field(&mut bx, 1);
+            let lp1 = slot.project_field(bx, 1);
             let lp1 = bx.load_operand(lp1).immediate();
-            slot.storage_dead(&mut bx);
+            slot.storage_dead(bx);
 
             let mut lp = bx.const_undef(self.landing_pad_type());
             lp = bx.insert_value(lp, lp0, 0);
@@ -277,12 +277,12 @@ fn codegen_resume_terminator(&mut self, helper: TerminatorCodegenHelper<'tcx>, m
     fn codegen_switchint_terminator(
         &mut self,
         helper: TerminatorCodegenHelper<'tcx>,
-        mut bx: Bx,
+        bx: &mut Bx,
         discr: &mir::Operand<'tcx>,
         switch_ty: Ty<'tcx>,
         targets: &SwitchTargets,
     ) {
-        let discr = self.codegen_operand(&mut bx, &discr);
+        let discr = self.codegen_operand(bx, &discr);
         // `switch_ty` is redundant, sanity-check that.
         assert_eq!(discr.layout.ty, switch_ty);
         let mut target_iter = targets.iter();
@@ -338,7 +338,7 @@ fn codegen_switchint_terminator(
         }
     }
 
-    fn codegen_return_terminator(&mut self, mut bx: Bx) {
+    fn codegen_return_terminator(&mut self, bx: &mut Bx) {
         // Call `va_end` if this is the definition of a C-variadic function.
         if self.fn_abi.c_variadic {
             // The `VaList` "spoofed" argument is just after all the real arguments.
@@ -368,11 +368,11 @@ fn codegen_return_terminator(&mut self, mut bx: Bx) {
             }
 
             PassMode::Direct(_) | PassMode::Pair(..) => {
-                let op = self.codegen_consume(&mut bx, mir::Place::return_place().as_ref());
+                let op = self.codegen_consume(bx, mir::Place::return_place().as_ref());
                 if let Ref(llval, _, align) = op.val {
                     bx.load(bx.backend_type(op.layout), llval, align)
                 } else {
-                    op.immediate_or_packed_pair(&mut bx)
+                    op.immediate_or_packed_pair(bx)
                 }
             }
 
@@ -388,8 +388,8 @@ fn codegen_return_terminator(&mut self, mut bx: Bx) {
                 };
                 let llslot = match op.val {
                     Immediate(_) | Pair(..) => {
-                        let scratch = PlaceRef::alloca(&mut bx, self.fn_abi.ret.layout);
-                        op.val.store(&mut bx, scratch);
+                        let scratch = PlaceRef::alloca(bx, self.fn_abi.ret.layout);
+                        op.val.store(bx, scratch);
                         scratch.llval
                     }
                     Ref(llval, _, align) => {
@@ -409,7 +409,7 @@ fn codegen_return_terminator(&mut self, mut bx: Bx) {
     fn codegen_drop_terminator(
         &mut self,
         helper: TerminatorCodegenHelper<'tcx>,
-        mut bx: Bx,
+        bx: &mut Bx,
         location: mir::Place<'tcx>,
         target: mir::BasicBlock,
         unwind: Option<mir::BasicBlock>,
@@ -420,11 +420,11 @@ fn codegen_drop_terminator(
 
         if let ty::InstanceDef::DropGlue(_, None) = drop_fn.def {
             // we don't actually need to drop anything.
-            helper.funclet_br(self, &mut bx, target);
+            helper.funclet_br(self, bx, target);
             return;
         }
 
-        let place = self.codegen_place(&mut bx, location.as_ref());
+        let place = self.codegen_place(bx, location.as_ref());
         let (args1, args2);
         let mut args = if let Some(llextra) = place.llextra {
             args2 = [place.llval, llextra];
@@ -462,7 +462,7 @@ fn codegen_drop_terminator(
                 args = &args[..1];
                 (
                     meth::VirtualIndex::from_index(ty::COMMON_VTABLE_ENTRIES_DROPINPLACE)
-                        .get_fn(&mut bx, vtable, ty, &fn_abi),
+                        .get_fn(bx, vtable, ty, &fn_abi),
                     fn_abi,
                 )
             }
@@ -507,7 +507,7 @@ fn codegen_drop_terminator(
                 debug!("args' = {:?}", args);
                 (
                     meth::VirtualIndex::from_index(ty::COMMON_VTABLE_ENTRIES_DROPINPLACE)
-                        .get_fn(&mut bx, vtable, ty, &fn_abi),
+                        .get_fn(bx, vtable, ty, &fn_abi),
                     fn_abi,
                 )
             }
@@ -515,7 +515,7 @@ fn codegen_drop_terminator(
         };
         helper.do_call(
             self,
-            &mut bx,
+            bx,
             fn_abi,
             drop_fn,
             args,
@@ -528,7 +528,7 @@ fn codegen_drop_terminator(
     fn codegen_assert_terminator(
         &mut self,
         helper: TerminatorCodegenHelper<'tcx>,
-        mut bx: Bx,
+        bx: &mut Bx,
         terminator: &mir::Terminator<'tcx>,
         cond: &mir::Operand<'tcx>,
         expected: bool,
@@ -537,7 +537,7 @@ fn codegen_assert_terminator(
         cleanup: Option<mir::BasicBlock>,
     ) {
         let span = terminator.source_info.span;
-        let cond = self.codegen_operand(&mut bx, cond).immediate();
+        let cond = self.codegen_operand(bx, cond).immediate();
         let mut const_cond = bx.const_to_opt_u128(cond, false).map(|c| c == 1);
 
         // This case can currently arise only from functions marked
@@ -555,7 +555,7 @@ fn codegen_assert_terminator(
 
         // Don't codegen the panic block if success if known.
         if const_cond == Some(expected) {
-            helper.funclet_br(self, &mut bx, target);
+            helper.funclet_br(self, bx, target);
             return;
         }
 
@@ -573,16 +573,16 @@ fn codegen_assert_terminator(
 
         // After this point, bx is the block for the call to panic.
         bx.switch_to_block(panic_block);
-        self.set_debug_loc(&mut bx, terminator.source_info);
+        self.set_debug_loc(bx, terminator.source_info);
 
         // Get the location information.
-        let location = self.get_caller_location(&mut bx, terminator.source_info).immediate();
+        let location = self.get_caller_location(bx, terminator.source_info).immediate();
 
         // Put together the arguments to the panic entry point.
         let (lang_item, args) = match msg {
             AssertKind::BoundsCheck { ref len, ref index } => {
-                let len = self.codegen_operand(&mut bx, len).immediate();
-                let index = self.codegen_operand(&mut bx, index).immediate();
+                let len = self.codegen_operand(bx, len).immediate();
+                let index = self.codegen_operand(bx, index).immediate();
                 // It's `fn panic_bounds_check(index: usize, len: usize)`,
                 // and `#[track_caller]` adds an implicit third argument.
                 (LangItem::PanicBoundsCheck, vec![index, len, location])
@@ -595,26 +595,26 @@ fn codegen_assert_terminator(
             }
         };
 
-        let (fn_abi, llfn) = common::build_langcall(&bx, Some(span), lang_item);
+        let (fn_abi, llfn) = common::build_langcall(bx, Some(span), lang_item);
 
         // Codegen the actual panic invoke/call.
-        helper.do_call(self, &mut bx, fn_abi, llfn, &args, None, cleanup, &[]);
+        helper.do_call(self, bx, fn_abi, llfn, &args, None, cleanup, &[]);
     }
 
     fn codegen_abort_terminator(
         &mut self,
         helper: TerminatorCodegenHelper<'tcx>,
-        mut bx: Bx,
+        bx: &mut Bx,
         terminator: &mir::Terminator<'tcx>,
     ) {
         let span = terminator.source_info.span;
-        self.set_debug_loc(&mut bx, terminator.source_info);
+        self.set_debug_loc(bx, terminator.source_info);
 
         // Obtain the panic entry point.
-        let (fn_abi, llfn) = common::build_langcall(&bx, Some(span), LangItem::PanicNoUnwind);
+        let (fn_abi, llfn) = common::build_langcall(bx, Some(span), LangItem::PanicNoUnwind);
 
         // Codegen the actual panic invoke/call.
-        helper.do_call(self, &mut bx, fn_abi, llfn, &[], None, None, &[]);
+        helper.do_call(self, bx, fn_abi, llfn, &[], None, None, &[]);
     }
 
     /// Returns `true` if this is indeed a panic intrinsic and codegen is done.
@@ -701,7 +701,7 @@ enum AssertIntrinsic {
     fn codegen_call_terminator(
         &mut self,
         helper: TerminatorCodegenHelper<'tcx>,
-        mut bx: Bx,
+        bx: &mut Bx,
         terminator: &mir::Terminator<'tcx>,
         func: &mir::Operand<'tcx>,
         args: &[mir::Operand<'tcx>],
@@ -714,7 +714,7 @@ fn codegen_call_terminator(
         let span = source_info.span;
 
         // Create the callee. This is a fn ptr or zero-sized and hence a kind of scalar.
-        let callee = self.codegen_operand(&mut bx, func);
+        let callee = self.codegen_operand(bx, func);
 
         let (instance, mut llfn) = match *callee.layout.ty.kind() {
             ty::FnDef(def_id, substs) => (
@@ -734,7 +734,7 @@ fn codegen_call_terminator(
         if let Some(ty::InstanceDef::DropGlue(_, None)) = def {
             // Empty drop glue; a no-op.
             let target = target.unwrap();
-            helper.funclet_br(self, &mut bx, target);
+            helper.funclet_br(self, bx, target);
             return;
         }
 
@@ -763,8 +763,8 @@ fn codegen_call_terminator(
 
         if intrinsic == Some(sym::transmute) {
             if let Some(target) = target {
-                self.codegen_transmute(&mut bx, &args[0], destination);
-                helper.funclet_br(self, &mut bx, target);
+                self.codegen_transmute(bx, &args[0], destination);
+                helper.funclet_br(self, bx, target);
             } else {
                 // If we are trying to transmute to an uninhabited type,
                 // it is likely there is no allotted destination. In fact,
@@ -780,7 +780,7 @@ fn codegen_call_terminator(
 
         if self.codegen_panic_intrinsic(
             &helper,
-            &mut bx,
+            bx,
             intrinsic,
             instance,
             source_info,
@@ -797,21 +797,21 @@ fn codegen_call_terminator(
         // Prepare the return value destination
         let ret_dest = if target.is_some() {
             let is_intrinsic = intrinsic.is_some();
-            self.make_return_dest(&mut bx, destination, &fn_abi.ret, &mut llargs, is_intrinsic)
+            self.make_return_dest(bx, destination, &fn_abi.ret, &mut llargs, is_intrinsic)
         } else {
             ReturnDest::Nothing
         };
 
         if intrinsic == Some(sym::caller_location) {
             if let Some(target) = target {
-                let location = self
-                    .get_caller_location(&mut bx, mir::SourceInfo { span: fn_span, ..source_info });
+                let location =
+                    self.get_caller_location(bx, mir::SourceInfo { span: fn_span, ..source_info });
 
                 if let ReturnDest::IndirectOperand(tmp, _) = ret_dest {
-                    location.val.store(&mut bx, tmp);
+                    location.val.store(bx, tmp);
                 }
-                self.store_return(&mut bx, ret_dest, &fn_abi.ret, location.immediate());
-                helper.funclet_br(self, &mut bx, target);
+                self.store_return(bx, ret_dest, &fn_abi.ret, location.immediate());
+                helper.funclet_br(self, bx, target);
             }
             return;
         }
@@ -857,12 +857,12 @@ fn codegen_call_terminator(
                             }
                         }
 
-                        self.codegen_operand(&mut bx, arg)
+                        self.codegen_operand(bx, arg)
                     })
                     .collect();
 
                 Self::codegen_intrinsic_call(
-                    &mut bx,
+                    bx,
                     *instance.as_ref().unwrap(),
                     &fn_abi,
                     &args,
@@ -871,11 +871,11 @@ fn codegen_call_terminator(
                 );
 
                 if let ReturnDest::IndirectOperand(dst, _) = ret_dest {
-                    self.store_return(&mut bx, ret_dest, &fn_abi.ret, dst.llval);
+                    self.store_return(bx, ret_dest, &fn_abi.ret, dst.llval);
                 }
 
                 if let Some(target) = target {
-                    helper.funclet_br(self, &mut bx, target);
+                    helper.funclet_br(self, bx, target);
                 } else {
                     bx.unreachable();
                 }
@@ -894,7 +894,7 @@ fn codegen_call_terminator(
 
         let mut copied_constant_arguments = vec![];
         'make_args: for (i, arg) in first_args.iter().enumerate() {
-            let mut op = self.codegen_operand(&mut bx, arg);
+            let mut op = self.codegen_operand(bx, arg);
 
             if let (0, Some(ty::InstanceDef::Virtual(_, idx))) = (i, def) {
                 match op.val {
@@ -909,7 +909,7 @@ fn codegen_call_terminator(
                             && !op.layout.ty.is_region_ptr()
                         {
                             for i in 0..op.layout.fields.count() {
-                                let field = op.extract_field(&mut bx, i);
+                                let field = op.extract_field(bx, i);
                                 if !field.layout.is_zst() {
                                     // we found the one non-zero-sized field that is allowed
                                     // now find *its* non-zero-sized field, or stop if it's a
@@ -926,7 +926,7 @@ fn codegen_call_terminator(
                         // data pointer and vtable. Look up the method in the vtable, and pass
                         // the data pointer as the first argument
                         llfn = Some(meth::VirtualIndex::from_index(idx).get_fn(
-                            &mut bx,
+                            bx,
                             meta,
                             op.layout.ty,
                             &fn_abi,
@@ -937,7 +937,7 @@ fn codegen_call_terminator(
                     Ref(data_ptr, Some(meta), _) => {
                         // by-value dynamic dispatch
                         llfn = Some(meth::VirtualIndex::from_index(idx).get_fn(
-                            &mut bx,
+                            bx,
                             meta,
                             op.layout.ty,
                             &fn_abi,
@@ -954,11 +954,11 @@ fn codegen_call_terminator(
                         }
                         // FIXME(dyn-star): Make sure this is done on a &dyn* receiver
                         let place = op.deref(bx.cx());
-                        let data_ptr = place.project_field(&mut bx, 0);
-                        let meta_ptr = place.project_field(&mut bx, 1);
+                        let data_ptr = place.project_field(bx, 0);
+                        let meta_ptr = place.project_field(bx, 1);
                         let meta = bx.load_operand(meta_ptr);
                         llfn = Some(meth::VirtualIndex::from_index(idx).get_fn(
-                            &mut bx,
+                            bx,
                             meta.immediate(),
                             op.layout.ty,
                             &fn_abi,
@@ -977,24 +977,19 @@ fn codegen_call_terminator(
             match (arg, op.val) {
                 (&mir::Operand::Copy(_), Ref(_, None, _))
                 | (&mir::Operand::Constant(_), Ref(_, None, _)) => {
-                    let tmp = PlaceRef::alloca(&mut bx, op.layout);
+                    let tmp = PlaceRef::alloca(bx, op.layout);
                     bx.lifetime_start(tmp.llval, tmp.layout.size);
-                    op.val.store(&mut bx, tmp);
+                    op.val.store(bx, tmp);
                     op.val = Ref(tmp.llval, None, tmp.align);
                     copied_constant_arguments.push(tmp);
                 }
                 _ => {}
             }
 
-            self.codegen_argument(&mut bx, op, &mut llargs, &fn_abi.args[i]);
+            self.codegen_argument(bx, op, &mut llargs, &fn_abi.args[i]);
         }
         let num_untupled = untuple.map(|tup| {
-            self.codegen_arguments_untupled(
-                &mut bx,
-                tup,
-                &mut llargs,
-                &fn_abi.args[first_args.len()..],
-            )
+            self.codegen_arguments_untupled(bx, tup, &mut llargs, &fn_abi.args[first_args.len()..])
         });
 
         let needs_location =
@@ -1014,14 +1009,14 @@ fn codegen_call_terminator(
                 fn_abi,
             );
             let location =
-                self.get_caller_location(&mut bx, mir::SourceInfo { span: fn_span, ..source_info });
+                self.get_caller_location(bx, mir::SourceInfo { span: fn_span, ..source_info });
             debug!(
                 "codegen_call_terminator({:?}): location={:?} (fn_span {:?})",
                 terminator, location, fn_span
             );
 
             let last_arg = fn_abi.args.last().unwrap();
-            self.codegen_argument(&mut bx, location, &mut llargs, last_arg);
+            self.codegen_argument(bx, location, &mut llargs, last_arg);
         }
 
         let (is_indirect_call, fn_ptr) = match (llfn, instance) {
@@ -1048,7 +1043,7 @@ fn codegen_call_terminator(
             bx.switch_to_block(bb_pass);
             helper.do_call(
                 self,
-                &mut bx,
+                bx,
                 fn_abi,
                 fn_ptr,
                 &llargs,
@@ -1066,7 +1061,7 @@ fn codegen_call_terminator(
 
         helper.do_call(
             self,
-            &mut bx,
+            bx,
             fn_abi,
             fn_ptr,
             &llargs,
@@ -1079,7 +1074,7 @@ fn codegen_call_terminator(
     fn codegen_asm_terminator(
         &mut self,
         helper: TerminatorCodegenHelper<'tcx>,
-        mut bx: Bx,
+        bx: &mut Bx,
         terminator: &mir::Terminator<'tcx>,
         template: &[ast::InlineAsmTemplatePiece],
         operands: &[mir::InlineAsmOperand<'tcx>],
@@ -1095,17 +1090,17 @@ fn codegen_asm_terminator(
             .iter()
             .map(|op| match *op {
                 mir::InlineAsmOperand::In { reg, ref value } => {
-                    let value = self.codegen_operand(&mut bx, value);
+                    let value = self.codegen_operand(bx, value);
                     InlineAsmOperandRef::In { reg, value }
                 }
                 mir::InlineAsmOperand::Out { reg, late, ref place } => {
-                    let place = place.map(|place| self.codegen_place(&mut bx, place.as_ref()));
+                    let place = place.map(|place| self.codegen_place(bx, place.as_ref()));
                     InlineAsmOperandRef::Out { reg, late, place }
                 }
                 mir::InlineAsmOperand::InOut { reg, late, ref in_value, ref out_place } => {
-                    let in_value = self.codegen_operand(&mut bx, in_value);
+                    let in_value = self.codegen_operand(bx, in_value);
                     let out_place =
-                        out_place.map(|out_place| self.codegen_place(&mut bx, out_place.as_ref()));
+                        out_place.map(|out_place| self.codegen_place(bx, out_place.as_ref()));
                     InlineAsmOperandRef::InOut { reg, late, in_value, out_place }
                 }
                 mir::InlineAsmOperand::Const { ref value } => {
@@ -1143,7 +1138,7 @@ fn codegen_asm_terminator(
 
         helper.do_inlineasm(
             self,
-            &mut bx,
+            bx,
             template,
             &operands,
             options,
@@ -1158,14 +1153,14 @@ fn codegen_asm_terminator(
 impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
     pub fn codegen_block(&mut self, bb: mir::BasicBlock) {
         let llbb = self.llbb(bb);
-        let mut bx = Bx::build(self.cx, llbb);
+        let bx = &mut Bx::build(self.cx, llbb);
         let mir = self.mir;
         let data = &mir[bb];
 
         debug!("codegen_block({:?}={:?})", bb, data);
 
         for statement in &data.statements {
-            bx = self.codegen_statement(bx, statement);
+            self.codegen_statement(bx, statement);
         }
 
         self.codegen_terminator(bx, bb, data.terminator());
@@ -1173,7 +1168,7 @@ pub fn codegen_block(&mut self, bb: mir::BasicBlock) {
 
     fn codegen_terminator(
         &mut self,
-        mut bx: Bx,
+        bx: &mut Bx,
         bb: mir::BasicBlock,
         terminator: &'tcx mir::Terminator<'tcx>,
     ) {
@@ -1183,7 +1178,7 @@ fn codegen_terminator(
         let funclet_bb = self.cleanup_kinds[bb].funclet_bb(bb);
         let helper = TerminatorCodegenHelper { bb, terminator, funclet_bb };
 
-        self.set_debug_loc(&mut bx, terminator.source_info);
+        self.set_debug_loc(bx, terminator.source_info);
         match terminator.kind {
             mir::TerminatorKind::Resume => self.codegen_resume_terminator(helper, bx),
 
@@ -1192,7 +1187,7 @@ fn codegen_terminator(
             }
 
             mir::TerminatorKind::Goto { target } => {
-                helper.funclet_br(self, &mut bx, target);
+                helper.funclet_br(self, bx, target);
             }
 
             mir::TerminatorKind::SwitchInt { ref discr, switch_ty, ref targets } => {
index 4aab31fbfe7da71ba31369cb0658b2aaf93cc323..9ad96f7a44742f87d227114f102f388b5c110e01 100644 (file)
@@ -18,17 +18,16 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
     #[instrument(level = "trace", skip(self, bx))]
     pub fn codegen_rvalue(
         &mut self,
-        mut bx: Bx,
+        bx: &mut Bx,
         dest: PlaceRef<'tcx, Bx::Value>,
         rvalue: &mir::Rvalue<'tcx>,
-    ) -> Bx {
+    ) {
         match *rvalue {
             mir::Rvalue::Use(ref operand) => {
-                let cg_operand = self.codegen_operand(&mut bx, operand);
+                let cg_operand = self.codegen_operand(bx, operand);
                 // FIXME: consider not copying constants through stack. (Fixable by codegen'ing
                 // constants into `OperandValue::Ref`; why don’t we do that yet if we don’t?)
-                cg_operand.val.store(&mut bx, dest);
-                bx
+                cg_operand.val.store(bx, dest);
             }
 
             mir::Rvalue::Cast(mir::CastKind::Pointer(PointerCast::Unsize), ref source, _) => {
@@ -37,16 +36,16 @@ pub fn codegen_rvalue(
                 if bx.cx().is_backend_scalar_pair(dest.layout) {
                     // Into-coerce of a thin pointer to a fat pointer -- just
                     // use the operand path.
-                    let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
-                    temp.val.store(&mut bx, dest);
-                    return bx;
+                    let temp = self.codegen_rvalue_operand(bx, rvalue);
+                    temp.val.store(bx, dest);
+                    return;
                 }
 
                 // Unsize of a nontrivial struct. I would prefer for
                 // this to be eliminated by MIR building, but
                 // `CoerceUnsized` can be passed by a where-clause,
                 // so the (generic) MIR may not be able to expand it.
-                let operand = self.codegen_operand(&mut bx, source);
+                let operand = self.codegen_operand(bx, source);
                 match operand.val {
                     OperandValue::Pair(..) | OperandValue::Immediate(_) => {
                         // Unsize from an immediate structure. We don't
@@ -56,63 +55,62 @@ pub fn codegen_rvalue(
                         // index into the struct, and this case isn't
                         // important enough for it.
                         debug!("codegen_rvalue: creating ugly alloca");
-                        let scratch = PlaceRef::alloca(&mut bx, operand.layout);
-                        scratch.storage_live(&mut bx);
-                        operand.val.store(&mut bx, scratch);
-                        base::coerce_unsized_into(&mut bx, scratch, dest);
-                        scratch.storage_dead(&mut bx);
+                        let scratch = PlaceRef::alloca(bx, operand.layout);
+                        scratch.storage_live(bx);
+                        operand.val.store(bx, scratch);
+                        base::coerce_unsized_into(bx, scratch, dest);
+                        scratch.storage_dead(bx);
                     }
                     OperandValue::Ref(llref, None, align) => {
                         let source = PlaceRef::new_sized_aligned(llref, operand.layout, align);
-                        base::coerce_unsized_into(&mut bx, source, dest);
+                        base::coerce_unsized_into(bx, source, dest);
                     }
                     OperandValue::Ref(_, Some(_), _) => {
                         bug!("unsized coercion on an unsized rvalue");
                     }
                 }
-                bx
             }
 
             mir::Rvalue::Repeat(ref elem, count) => {
-                let cg_elem = self.codegen_operand(&mut bx, elem);
+                let cg_elem = self.codegen_operand(bx, elem);
 
                 // Do not generate the loop for zero-sized elements or empty arrays.
                 if dest.layout.is_zst() {
-                    return bx;
+                    return;
                 }
 
                 if let OperandValue::Immediate(v) = cg_elem.val {
                     let zero = bx.const_usize(0);
-                    let start = dest.project_index(&mut bx, zero).llval;
+                    let start = dest.project_index(bx, zero).llval;
                     let size = bx.const_usize(dest.layout.size.bytes());
 
                     // Use llvm.memset.p0i8.* to initialize all zero arrays
                     if bx.cx().const_to_opt_u128(v, false) == Some(0) {
                         let fill = bx.cx().const_u8(0);
                         bx.memset(start, fill, size, dest.align, MemFlags::empty());
-                        return bx;
+                        return;
                     }
 
                     // Use llvm.memset.p0i8.* to initialize byte arrays
                     let v = bx.from_immediate(v);
                     if bx.cx().val_ty(v) == bx.cx().type_i8() {
                         bx.memset(start, v, size, dest.align, MemFlags::empty());
-                        return bx;
+                        return;
                     }
                 }
 
                 let count =
                     self.monomorphize(count).eval_usize(bx.cx().tcx(), ty::ParamEnv::reveal_all());
 
-                bx.write_operand_repeatedly(cg_elem, count, dest)
+                bx.write_operand_repeatedly(cg_elem, count, dest);
             }
 
             mir::Rvalue::Aggregate(ref kind, ref operands) => {
                 let (dest, active_field_index) = match **kind {
                     mir::AggregateKind::Adt(adt_did, variant_index, _, _, active_field_index) => {
-                        dest.codegen_set_discr(&mut bx, variant_index);
+                        dest.codegen_set_discr(bx, variant_index);
                         if bx.tcx().adt_def(adt_did).is_enum() {
-                            (dest.project_downcast(&mut bx, variant_index), active_field_index)
+                            (dest.project_downcast(bx, variant_index), active_field_index)
                         } else {
                             (dest, active_field_index)
                         }
@@ -120,37 +118,35 @@ pub fn codegen_rvalue(
                     _ => (dest, None),
                 };
                 for (i, operand) in operands.iter().enumerate() {
-                    let op = self.codegen_operand(&mut bx, operand);
+                    let op = self.codegen_operand(bx, operand);
                     // Do not generate stores and GEPis for zero-sized fields.
                     if !op.layout.is_zst() {
                         let field_index = active_field_index.unwrap_or(i);
                         let field = if let mir::AggregateKind::Array(_) = **kind {
                             let llindex = bx.cx().const_usize(field_index as u64);
-                            dest.project_index(&mut bx, llindex)
+                            dest.project_index(bx, llindex)
                         } else {
-                            dest.project_field(&mut bx, field_index)
+                            dest.project_field(bx, field_index)
                         };
-                        op.val.store(&mut bx, field);
+                        op.val.store(bx, field);
                     }
                 }
-                bx
             }
 
             _ => {
                 assert!(self.rvalue_creates_operand(rvalue, DUMMY_SP));
-                let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
-                temp.val.store(&mut bx, dest);
-                bx
+                let temp = self.codegen_rvalue_operand(bx, rvalue);
+                temp.val.store(bx, dest);
             }
         }
     }
 
     pub fn codegen_rvalue_unsized(
         &mut self,
-        mut bx: Bx,
+        bx: &mut Bx,
         indirect_dest: PlaceRef<'tcx, Bx::Value>,
         rvalue: &mir::Rvalue<'tcx>,
-    ) -> Bx {
+    ) {
         debug!(
             "codegen_rvalue_unsized(indirect_dest.llval={:?}, rvalue={:?})",
             indirect_dest.llval, rvalue
@@ -158,9 +154,8 @@ pub fn codegen_rvalue_unsized(
 
         match *rvalue {
             mir::Rvalue::Use(ref operand) => {
-                let cg_operand = self.codegen_operand(&mut bx, operand);
-                cg_operand.val.store_unsized(&mut bx, indirect_dest);
-                bx
+                let cg_operand = self.codegen_operand(bx, operand);
+                cg_operand.val.store_unsized(bx, indirect_dest);
             }
 
             _ => bug!("unsized assignment other than `Rvalue::Use`"),
@@ -169,9 +164,9 @@ pub fn codegen_rvalue_unsized(
 
     pub fn codegen_rvalue_operand(
         &mut self,
-        mut bx: Bx,
+        bx: &mut Bx,
         rvalue: &mir::Rvalue<'tcx>,
-    ) -> (Bx, OperandRef<'tcx, Bx::Value>) {
+    ) -> OperandRef<'tcx, Bx::Value> {
         assert!(
             self.rvalue_creates_operand(rvalue, DUMMY_SP),
             "cannot codegen {:?} to operand",
@@ -180,7 +175,7 @@ pub fn codegen_rvalue_operand(
 
         match *rvalue {
             mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => {
-                let operand = self.codegen_operand(&mut bx, source);
+                let operand = self.codegen_operand(bx, source);
                 debug!("cast operand is {:?}", operand);
                 let cast = bx.cx().layout_of(self.monomorphize(mir_cast_ty));
 
@@ -245,7 +240,7 @@ pub fn codegen_rvalue_operand(
                             }
                         };
                         let (lldata, llextra) =
-                            base::unsize_ptr(&mut bx, lldata, operand.layout.ty, cast.ty, llextra);
+                            base::unsize_ptr(bx, lldata, operand.layout.ty, cast.ty, llextra);
                         OperandValue::Pair(lldata, llextra)
                     }
                     mir::CastKind::Pointer(PointerCast::MutToConstPointer)
@@ -278,7 +273,7 @@ pub fn codegen_rvalue_operand(
                             OperandValue::Pair(v, l) => (v, Some(l)),
                         };
                         let (lldata, llextra) =
-                            base::cast_to_dyn_star(&mut bx, lldata, operand.layout, cast.ty, llextra);
+                            base::cast_to_dyn_star(bx, lldata, operand.layout, cast.ty, llextra);
                         OperandValue::Pair(lldata, llextra)
                     }
                     mir::CastKind::Pointer(
@@ -299,7 +294,7 @@ pub fn codegen_rvalue_operand(
                         let ll_t_out = bx.cx().immediate_backend_type(cast);
                         if operand.layout.abi.is_uninhabited() {
                             let val = OperandValue::Immediate(bx.cx().const_undef(ll_t_out));
-                            return (bx, OperandRef { val, layout: cast });
+                            return OperandRef { val, layout: cast };
                         }
                         let r_t_in =
                             CastTy::from_ty(operand.layout.ty).expect("bad input type for cast");
@@ -348,7 +343,7 @@ pub fn codegen_rvalue_operand(
                         OperandValue::Immediate(newval)
                     }
                 };
-                (bx, OperandRef { val, layout: cast })
+                OperandRef { val, layout: cast }
             }
 
             mir::Rvalue::Ref(_, bk, place) => {
@@ -361,10 +356,7 @@ pub fn codegen_rvalue_operand(
                 self.codegen_place_to_pointer(bx, place, mk_ref)
             }
 
-            mir::Rvalue::CopyForDeref(place) => {
-                let operand = self.codegen_operand(&mut bx, &Operand::Copy(place));
-                (bx, operand)
-            }
+            mir::Rvalue::CopyForDeref(place) => self.codegen_operand(bx, &Operand::Copy(place)),
             mir::Rvalue::AddressOf(mutability, place) => {
                 let mk_ptr = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
                     tcx.mk_ptr(ty::TypeAndMut { ty, mutbl: mutability })
@@ -373,23 +365,22 @@ pub fn codegen_rvalue_operand(
             }
 
             mir::Rvalue::Len(place) => {
-                let size = self.evaluate_array_len(&mut bx, place);
-                let operand = OperandRef {
+                let size = self.evaluate_array_len(bx, place);
+                OperandRef {
                     val: OperandValue::Immediate(size),
                     layout: bx.cx().layout_of(bx.tcx().types.usize),
-                };
-                (bx, operand)
+                }
             }
 
             mir::Rvalue::BinaryOp(op, box (ref lhs, ref rhs)) => {
-                let lhs = self.codegen_operand(&mut bx, lhs);
-                let rhs = self.codegen_operand(&mut bx, rhs);
+                let lhs = self.codegen_operand(bx, lhs);
+                let rhs = self.codegen_operand(bx, rhs);
                 let llresult = match (lhs.val, rhs.val) {
                     (
                         OperandValue::Pair(lhs_addr, lhs_extra),
                         OperandValue::Pair(rhs_addr, rhs_extra),
                     ) => self.codegen_fat_ptr_binop(
-                        &mut bx,
+                        bx,
                         op,
                         lhs_addr,
                         lhs_extra,
@@ -399,22 +390,21 @@ pub fn codegen_rvalue_operand(
                     ),
 
                     (OperandValue::Immediate(lhs_val), OperandValue::Immediate(rhs_val)) => {
-                        self.codegen_scalar_binop(&mut bx, op, lhs_val, rhs_val, lhs.layout.ty)
+                        self.codegen_scalar_binop(bx, op, lhs_val, rhs_val, lhs.layout.ty)
                     }
 
                     _ => bug!(),
                 };
-                let operand = OperandRef {
+                OperandRef {
                     val: OperandValue::Immediate(llresult),
                     layout: bx.cx().layout_of(op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty)),
-                };
-                (bx, operand)
+                }
             }
             mir::Rvalue::CheckedBinaryOp(op, box (ref lhs, ref rhs)) => {
-                let lhs = self.codegen_operand(&mut bx, lhs);
-                let rhs = self.codegen_operand(&mut bx, rhs);
+                let lhs = self.codegen_operand(bx, lhs);
+                let rhs = self.codegen_operand(bx, rhs);
                 let result = self.codegen_scalar_checked_binop(
-                    &mut bx,
+                    bx,
                     op,
                     lhs.immediate(),
                     rhs.immediate(),
@@ -422,13 +412,11 @@ pub fn codegen_rvalue_operand(
                 );
                 let val_ty = op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty);
                 let operand_ty = bx.tcx().intern_tup(&[val_ty, bx.tcx().types.bool]);
-                let operand = OperandRef { val: result, layout: bx.cx().layout_of(operand_ty) };
-
-                (bx, operand)
+                OperandRef { val: result, layout: bx.cx().layout_of(operand_ty) }
             }
 
             mir::Rvalue::UnaryOp(op, ref operand) => {
-                let operand = self.codegen_operand(&mut bx, operand);
+                let operand = self.codegen_operand(bx, operand);
                 let lloperand = operand.immediate();
                 let is_float = operand.layout.ty.is_floating_point();
                 let llval = match op {
@@ -441,22 +429,17 @@ pub fn codegen_rvalue_operand(
                         }
                     }
                 };
-                (bx, OperandRef { val: OperandValue::Immediate(llval), layout: operand.layout })
+                OperandRef { val: OperandValue::Immediate(llval), layout: operand.layout }
             }
 
             mir::Rvalue::Discriminant(ref place) => {
                 let discr_ty = rvalue.ty(self.mir, bx.tcx());
                 let discr_ty = self.monomorphize(discr_ty);
-                let discr = self
-                    .codegen_place(&mut bx, place.as_ref())
-                    .codegen_get_discr(&mut bx, discr_ty);
-                (
-                    bx,
-                    OperandRef {
-                        val: OperandValue::Immediate(discr),
-                        layout: self.cx.layout_of(discr_ty),
-                    },
-                )
+                let discr = self.codegen_place(bx, place.as_ref()).codegen_get_discr(bx, discr_ty);
+                OperandRef {
+                    val: OperandValue::Immediate(discr),
+                    layout: self.cx.layout_of(discr_ty),
+                }
             }
 
             mir::Rvalue::NullaryOp(null_op, ty) => {
@@ -469,36 +452,27 @@ pub fn codegen_rvalue_operand(
                 };
                 let val = bx.cx().const_usize(val);
                 let tcx = self.cx.tcx();
-                (
-                    bx,
-                    OperandRef {
-                        val: OperandValue::Immediate(val),
-                        layout: self.cx.layout_of(tcx.types.usize),
-                    },
-                )
+                OperandRef {
+                    val: OperandValue::Immediate(val),
+                    layout: self.cx.layout_of(tcx.types.usize),
+                }
             }
 
             mir::Rvalue::ThreadLocalRef(def_id) => {
                 assert!(bx.cx().tcx().is_static(def_id));
                 let static_ = bx.get_static(def_id);
                 let layout = bx.layout_of(bx.cx().tcx().static_ptr_ty(def_id));
-                let operand = OperandRef::from_immediate_or_packed_pair(&mut bx, static_, layout);
-                (bx, operand)
-            }
-            mir::Rvalue::Use(ref operand) => {
-                let operand = self.codegen_operand(&mut bx, operand);
-                (bx, operand)
+                OperandRef::from_immediate_or_packed_pair(bx, static_, layout)
             }
+            mir::Rvalue::Use(ref operand) => self.codegen_operand(bx, operand),
             mir::Rvalue::Repeat(..) | mir::Rvalue::Aggregate(..) => {
                 // According to `rvalue_creates_operand`, only ZST
                 // aggregate rvalues are allowed to be operands.
                 let ty = rvalue.ty(self.mir, self.cx.tcx());
-                let operand =
-                    OperandRef::new_zst(&mut bx, self.cx.layout_of(self.monomorphize(ty)));
-                (bx, operand)
+                OperandRef::new_zst(bx, self.cx.layout_of(self.monomorphize(ty)))
             }
             mir::Rvalue::ShallowInitBox(ref operand, content_ty) => {
-                let operand = self.codegen_operand(&mut bx, operand);
+                let operand = self.codegen_operand(bx, operand);
                 let lloperand = operand.immediate();
 
                 let content_ty = self.monomorphize(content_ty);
@@ -506,8 +480,7 @@ pub fn codegen_rvalue_operand(
                 let llty_ptr = bx.cx().backend_type(box_layout);
 
                 let val = bx.pointercast(lloperand, llty_ptr);
-                let operand = OperandRef { val: OperandValue::Immediate(val), layout: box_layout };
-                (bx, operand)
+                OperandRef { val: OperandValue::Immediate(val), layout: box_layout }
             }
         }
     }
@@ -531,11 +504,11 @@ fn evaluate_array_len(&mut self, bx: &mut Bx, place: mir::Place<'tcx>) -> Bx::Va
     /// Codegen an `Rvalue::AddressOf` or `Rvalue::Ref`
     fn codegen_place_to_pointer(
         &mut self,
-        mut bx: Bx,
+        bx: &mut Bx,
         place: mir::Place<'tcx>,
         mk_ptr_ty: impl FnOnce(TyCtxt<'tcx>, Ty<'tcx>) -> Ty<'tcx>,
-    ) -> (Bx, OperandRef<'tcx, Bx::Value>) {
-        let cg_place = self.codegen_place(&mut bx, place.as_ref());
+    ) -> OperandRef<'tcx, Bx::Value> {
+        let cg_place = self.codegen_place(bx, place.as_ref());
 
         let ty = cg_place.layout.ty;
 
@@ -546,7 +519,7 @@ fn codegen_place_to_pointer(
         } else {
             OperandValue::Pair(cg_place.llval, cg_place.llextra.unwrap())
         };
-        (bx, OperandRef { val, layout: self.cx.layout_of(mk_ptr_ty(self.cx.tcx(), ty)) })
+        OperandRef { val, layout: self.cx.layout_of(mk_ptr_ty(self.cx.tcx(), ty)) }
     }
 
     pub fn codegen_scalar_binop(
index 1db0fb3a6f1b0381da9eb2a1ca06eb6e3689af75..19452c8cdc805da6068a7b37f8a871368961588b 100644 (file)
@@ -8,8 +8,8 @@
 
 impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
     #[instrument(level = "debug", skip(self, bx))]
-    pub fn codegen_statement(&mut self, mut bx: Bx, statement: &mir::Statement<'tcx>) -> Bx {
-        self.set_debug_loc(&mut bx, statement.source_info);
+    pub fn codegen_statement(&mut self, bx: &mut Bx, statement: &mir::Statement<'tcx>) {
+        self.set_debug_loc(bx, statement.source_info);
         match statement.kind {
             mir::StatementKind::Assign(box (ref place, ref rvalue)) => {
                 if let Some(index) = place.as_local() {
@@ -19,10 +19,9 @@ pub fn codegen_statement(&mut self, mut bx: Bx, statement: &mir::Statement<'tcx>
                             self.codegen_rvalue_unsized(bx, cg_indirect_dest, rvalue)
                         }
                         LocalRef::Operand(None) => {
-                            let (mut bx, operand) = self.codegen_rvalue_operand(bx, rvalue);
+                            let operand = self.codegen_rvalue_operand(bx, rvalue);
                             self.locals[index] = LocalRef::Operand(Some(operand));
-                            self.debug_introduce_local(&mut bx, index);
-                            bx
+                            self.debug_introduce_local(bx, index);
                         }
                         LocalRef::Operand(Some(op)) => {
                             if !op.layout.is_zst() {
@@ -35,59 +34,52 @@ pub fn codegen_statement(&mut self, mut bx: Bx, statement: &mir::Statement<'tcx>
 
                             // If the type is zero-sized, it's already been set here,
                             // but we still need to make sure we codegen the operand
-                            self.codegen_rvalue_operand(bx, rvalue).0
+                            self.codegen_rvalue_operand(bx, rvalue);
                         }
                     }
                 } else {
-                    let cg_dest = self.codegen_place(&mut bx, place.as_ref());
-                    self.codegen_rvalue(bx, cg_dest, rvalue)
+                    let cg_dest = self.codegen_place(bx, place.as_ref());
+                    self.codegen_rvalue(bx, cg_dest, rvalue);
                 }
             }
             mir::StatementKind::SetDiscriminant { box ref place, variant_index } => {
-                self.codegen_place(&mut bx, place.as_ref())
-                    .codegen_set_discr(&mut bx, variant_index);
-                bx
+                self.codegen_place(bx, place.as_ref()).codegen_set_discr(bx, variant_index);
             }
             mir::StatementKind::Deinit(..) => {
                 // For now, don't codegen this to anything. In the future it may be worth
                 // experimenting with what kind of information we can emit to LLVM without hurting
                 // perf here
-                bx
             }
             mir::StatementKind::StorageLive(local) => {
                 if let LocalRef::Place(cg_place) = self.locals[local] {
-                    cg_place.storage_live(&mut bx);
+                    cg_place.storage_live(bx);
                 } else if let LocalRef::UnsizedPlace(cg_indirect_place) = self.locals[local] {
-                    cg_indirect_place.storage_live(&mut bx);
+                    cg_indirect_place.storage_live(bx);
                 }
-                bx
             }
             mir::StatementKind::StorageDead(local) => {
                 if let LocalRef::Place(cg_place) = self.locals[local] {
-                    cg_place.storage_dead(&mut bx);
+                    cg_place.storage_dead(bx);
                 } else if let LocalRef::UnsizedPlace(cg_indirect_place) = self.locals[local] {
-                    cg_indirect_place.storage_dead(&mut bx);
+                    cg_indirect_place.storage_dead(bx);
                 }
-                bx
             }
             mir::StatementKind::Coverage(box ref coverage) => {
-                self.codegen_coverage(&mut bx, coverage.clone(), statement.source_info.scope);
-                bx
+                self.codegen_coverage(bx, coverage.clone(), statement.source_info.scope);
             }
             mir::StatementKind::Intrinsic(box NonDivergingIntrinsic::Assume(ref op)) => {
-                let op_val = self.codegen_operand(&mut bx, op);
+                let op_val = self.codegen_operand(bx, op);
                 bx.assume(op_val.immediate());
-                bx
             }
             mir::StatementKind::Intrinsic(box NonDivergingIntrinsic::CopyNonOverlapping(
                 mir::CopyNonOverlapping { ref count, ref src, ref dst },
             )) => {
-                let dst_val = self.codegen_operand(&mut bx, dst);
-                let src_val = self.codegen_operand(&mut bx, src);
-                let count = self.codegen_operand(&mut bx, count).immediate();
+                let dst_val = self.codegen_operand(bx, dst);
+                let src_val = self.codegen_operand(bx, src);
+                let count = self.codegen_operand(bx, count).immediate();
                 let pointee_layout = dst_val
                     .layout
-                    .pointee_info_at(&bx, rustc_target::abi::Size::ZERO)
+                    .pointee_info_at(bx, rustc_target::abi::Size::ZERO)
                     .expect("Expected pointer");
                 let bytes = bx.mul(count, bx.const_usize(pointee_layout.size.bytes()));
 
@@ -95,12 +87,11 @@ pub fn codegen_statement(&mut self, mut bx: Bx, statement: &mir::Statement<'tcx>
                 let dst = dst_val.immediate();
                 let src = src_val.immediate();
                 bx.memcpy(dst, align, src, align, bytes, crate::MemFlags::empty());
-                bx
             }
             mir::StatementKind::FakeRead(..)
             | mir::StatementKind::Retag { .. }
             | mir::StatementKind::AscribeUserType(..)
-            | mir::StatementKind::Nop => bx,
+            | mir::StatementKind::Nop => {}
         }
     }
 }
index 01408f39fb30606a63b5a8b4a81be9a8d15ca610..bc679a5dc87b122a4fcc7ea8111216f8b871b8f3 100644 (file)
@@ -151,11 +151,11 @@ fn load_operand(&mut self, place: PlaceRef<'tcx, Self::Value>)
 
     /// Called for Rvalue::Repeat when the elem is neither a ZST nor optimizable using memset.
     fn write_operand_repeatedly(
-        self,
+        &mut self,
         elem: OperandRef<'tcx, Self::Value>,
         count: u64,
         dest: PlaceRef<'tcx, Self::Value>,
-    ) -> Self;
+    );
 
     fn range_metadata(&mut self, load: Self::Value, range: WrappingRange);
     fn nonnull_metadata(&mut self, load: Self::Value);