]> git.lizzy.rs Git - rust.git/blobdiff - src/librustc_codegen_llvm/mir/rvalue.rs
Beginning of moving all backend-agnostic code to rustc_codegen_ssa
[rust.git] / src / librustc_codegen_llvm / mir / rvalue.rs
index c3ec347f60876c03dc846bbec04f75f546b8d75d..9870c93a5088141edc9ea4e69131fee09ceda667 100644 (file)
@@ -8,37 +8,35 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-use llvm;
 use rustc::ty::{self, Ty};
 use rustc::ty::cast::{CastTy, IntTy};
-use rustc::ty::layout::{self, LayoutOf};
+use rustc::ty::layout::{self, LayoutOf, HasTyCtxt};
 use rustc::mir;
 use rustc::middle::lang_items::ExchangeMallocFnLangItem;
 use rustc_apfloat::{ieee, Float, Status, Round};
 use std::{u128, i128};
 
 use base;
-use builder::Builder;
+use builder::MemFlags;
 use callee;
-use common::{self, val_ty};
-use common::{C_bool, C_u8, C_i32, C_u32, C_u64, C_undef, C_null, C_usize, C_uint, C_uint_big};
-use consts;
+use common;
+use rustc_codegen_ssa::common::{RealPredicate, IntPredicate};
 use monomorphize;
-use type_::Type;
 use type_of::LayoutLlvmExt;
-use value::Value;
+
+use interfaces::*;
 
 use super::{FunctionCx, LocalRef};
 use super::operand::{OperandRef, OperandValue};
 use super::place::PlaceRef;
 
-impl FunctionCx<'a, 'll, 'tcx> {
-    pub fn codegen_rvalue(&mut self,
-                        bx: Builder<'a, 'll, 'tcx>,
-                        dest: PlaceRef<'ll, 'tcx>,
-                        rvalue: &mir::Rvalue<'tcx>)
-                        -> Builder<'a, 'll, 'tcx>
-    {
+impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
+    pub fn codegen_rvalue(
+        &mut self,
+        bx: Bx,
+        dest: PlaceRef<'tcx, Bx::Value>,
+        rvalue: &mir::Rvalue<'tcx>
+    ) -> Bx {
         debug!("codegen_rvalue(dest.llval={:?}, rvalue={:?})",
                dest.llval, rvalue);
 
@@ -102,28 +100,27 @@ pub fn codegen_rvalue(&mut self,
                     return bx;
                 }
 
-                let start = dest.project_index(&bx, C_usize(bx.cx, 0)).llval;
+                let start = dest.project_index(&bx, bx.cx().const_usize(0)).llval;
 
                 if let OperandValue::Immediate(v) = cg_elem.val {
-                    let align = C_i32(bx.cx, dest.align.abi() as i32);
-                    let size = C_usize(bx.cx, dest.layout.size.bytes());
+                    let size = bx.cx().const_usize(dest.layout.size.bytes());
 
                     // Use llvm.memset.p0i8.* to initialize all zero arrays
-                    if common::is_const_integral(v) && common::const_to_uint(v) == 0 {
-                        let fill = C_u8(bx.cx, 0);
-                        base::call_memset(&bx, start, fill, size, align, false);
+                    if bx.cx().is_const_integral(v) && bx.cx().const_to_uint(v) == 0 {
+                        let fill = bx.cx().const_u8(0);
+                        bx.memset(start, fill, size, dest.align, MemFlags::empty());
                         return bx;
                     }
 
                     // Use llvm.memset.p0i8.* to initialize byte arrays
                     let v = base::from_immediate(&bx, v);
-                    if common::val_ty(v) == Type::i8(bx.cx) {
-                        base::call_memset(&bx, start, v, size, align, false);
+                    if bx.cx().val_ty(v) == bx.cx().type_i8() {
+                        bx.memset(start, v, size, dest.align, MemFlags::empty());
                         return bx;
                     }
                 }
 
-                let count = C_usize(bx.cx, count);
+                let count = bx.cx().const_usize(count);
                 let end = dest.project_index(&bx, count).llval;
 
                 let header_bx = bx.build_sibling_block("repeat_loop_header");
@@ -131,15 +128,15 @@ pub fn codegen_rvalue(&mut self,
                 let next_bx = bx.build_sibling_block("repeat_loop_next");
 
                 bx.br(header_bx.llbb());
-                let current = header_bx.phi(common::val_ty(start), &[start], &[bx.llbb()]);
+                let current = header_bx.phi(bx.cx().val_ty(start), &[start], &[bx.llbb()]);
 
-                let keep_going = header_bx.icmp(llvm::IntNE, current, end);
+                let keep_going = header_bx.icmp(IntPredicate::IntNE, current, end);
                 header_bx.cond_br(keep_going, body_bx.llbb(), next_bx.llbb());
 
                 cg_elem.val.store(&body_bx,
                     PlaceRef::new_sized(current, cg_elem.layout, dest.align));
 
-                let next = body_bx.inbounds_gep(current, &[C_usize(bx.cx, 1)]);
+                let next = body_bx.inbounds_gep(current, &[bx.cx().const_usize(1)]);
                 body_bx.br(header_bx.llbb());
                 header_bx.add_incoming_to_phi(current, next, body_bx.llbb());
 
@@ -178,12 +175,12 @@ pub fn codegen_rvalue(&mut self,
         }
     }
 
-    pub fn codegen_rvalue_unsized(&mut self,
-                        bx: Builder<'a, 'll, 'tcx>,
-                        indirect_dest: PlaceRef<'ll, 'tcx>,
-                        rvalue: &mir::Rvalue<'tcx>)
-                        -> Builder<'a, 'll, 'tcx>
-    {
+    pub fn codegen_rvalue_unsized(
+        &mut self,
+        bx: Bx,
+        indirect_dest: PlaceRef<'tcx, Bx::Value>,
+        rvalue: &mir::Rvalue<'tcx>,
+    ) -> Bx {
         debug!("codegen_rvalue_unsized(indirect_dest.llval={:?}, rvalue={:?})",
                indirect_dest.llval, rvalue);
 
@@ -198,29 +195,29 @@ pub fn codegen_rvalue_unsized(&mut self,
         }
     }
 
-    pub fn codegen_rvalue_operand(&mut self,
-                                bx: Builder<'a, 'll, 'tcx>,
-                                rvalue: &mir::Rvalue<'tcx>)
-                                -> (Builder<'a, 'll, 'tcx>, OperandRef<'ll, 'tcx>)
-    {
+    pub fn codegen_rvalue_operand(
+        &mut self,
+        bx: Bx,
+        rvalue: &mir::Rvalue<'tcx>
+    ) -> (Bx, OperandRef<'tcx, Bx::Value>) {
         assert!(self.rvalue_creates_operand(rvalue), "cannot codegen {:?} to operand", rvalue);
 
         match *rvalue {
             mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => {
                 let operand = self.codegen_operand(&bx, source);
                 debug!("cast operand is {:?}", operand);
-                let cast = bx.cx.layout_of(self.monomorphize(&mir_cast_ty));
+                let cast = bx.cx().layout_of(self.monomorphize(&mir_cast_ty));
 
                 let val = match *kind {
                     mir::CastKind::ReifyFnPointer => {
                         match operand.layout.ty.sty {
                             ty::FnDef(def_id, substs) => {
-                                if bx.cx.tcx.has_attr(def_id, "rustc_args_required_const") {
+                                if bx.cx().tcx().has_attr(def_id, "rustc_args_required_const") {
                                     bug!("reifying a fn ptr that requires \
                                           const arguments");
                                 }
                                 OperandValue::Immediate(
-                                    callee::resolve_and_get_fn(bx.cx, def_id, substs))
+                                    callee::resolve_and_get_fn(bx.cx(), def_id, substs))
                             }
                             _ => {
                                 bug!("{} cannot be reified to a fn ptr", operand.layout.ty)
@@ -231,8 +228,8 @@ pub fn codegen_rvalue_operand(&mut self,
                         match operand.layout.ty.sty {
                             ty::Closure(def_id, substs) => {
                                 let instance = monomorphize::resolve_closure(
-                                    bx.cx.tcx, def_id, substs, ty::ClosureKind::FnOnce);
-                                OperandValue::Immediate(callee::get_fn(bx.cx, instance))
+                                    bx.cx().tcx(), def_id, substs, ty::ClosureKind::FnOnce);
+                                OperandValue::Immediate(bx.cx().get_fn(instance))
                             }
                             _ => {
                                 bug!("{} cannot be cast to a fn ptr", operand.layout.ty)
@@ -255,7 +252,7 @@ pub fn codegen_rvalue_operand(&mut self,
                                 // HACK(eddyb) have to bitcast pointers
                                 // until LLVM removes pointee types.
                                 let lldata = bx.pointercast(lldata,
-                                    cast.scalar_pair_element_llvm_type(bx.cx, 0, true));
+                                    bx.cx().scalar_pair_element_backend_type(cast, 0, true));
                                 OperandValue::Pair(lldata, llextra)
                             }
                             OperandValue::Immediate(lldata) => {
@@ -274,12 +271,12 @@ pub fn codegen_rvalue_operand(&mut self,
                         if let OperandValue::Pair(data_ptr, meta) = operand.val {
                             if cast.is_llvm_scalar_pair() {
                                 let data_cast = bx.pointercast(data_ptr,
-                                    cast.scalar_pair_element_llvm_type(bx.cx, 0, true));
+                                    bx.cx().scalar_pair_element_backend_type(cast, 0, true));
                                 OperandValue::Pair(data_cast, meta)
                             } else { // cast to thin-ptr
                                 // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
                                 // pointer-cast of that pointer to desired pointer type.
-                                let llcast_ty = cast.immediate_llvm_type(bx.cx);
+                                let llcast_ty = bx.cx().immediate_backend_type(cast);
                                 let llval = bx.pointercast(data_ptr, llcast_ty);
                                 OperandValue::Immediate(llval)
                             }
@@ -289,24 +286,25 @@ pub fn codegen_rvalue_operand(&mut self,
                     }
                     mir::CastKind::Misc => {
                         assert!(cast.is_llvm_immediate());
-                        let ll_t_out = cast.immediate_llvm_type(bx.cx);
-                        if operand.layout.abi == layout::Abi::Uninhabited {
+                        let ll_t_out = bx.cx().immediate_backend_type(cast);
+                        if operand.layout.abi.is_uninhabited() {
+                            let val = OperandValue::Immediate(bx.cx().const_undef(ll_t_out));
                             return (bx, OperandRef {
-                                val: OperandValue::Immediate(C_undef(ll_t_out)),
+                                val,
                                 layout: cast,
                             });
                         }
                         let r_t_in = CastTy::from_ty(operand.layout.ty)
                             .expect("bad input type for cast");
                         let r_t_out = CastTy::from_ty(cast.ty).expect("bad output type for cast");
-                        let ll_t_in = operand.layout.immediate_llvm_type(bx.cx);
+                        let ll_t_in = bx.cx().immediate_backend_type(operand.layout);
                         match operand.layout.variants {
                             layout::Variants::Single { index } => {
                                 if let Some(def) = operand.layout.ty.ty_adt_def() {
                                     let discr_val = def
-                                        .discriminant_for_variant(bx.cx.tcx, index)
+                                        .discriminant_for_variant(bx.cx().tcx(), index)
                                         .val;
-                                    let discr = C_uint_big(ll_t_out, discr_val);
+                                    let discr = bx.cx().const_uint_big(ll_t_out, discr_val);
                                     return (bx, OperandRef {
                                         val: OperandValue::Immediate(discr),
                                         layout: cast,
@@ -327,7 +325,7 @@ pub fn codegen_rvalue_operand(&mut self,
                                 // then `i1 1` (i.e. E::B) is effectively `i8 -1`.
                                 signed = !scalar.is_bool() && s;
 
-                                let er = scalar.valid_range_exclusive(bx.cx);
+                                let er = scalar.valid_range_exclusive(bx.cx());
                                 if er.end != er.start &&
                                    scalar.valid_range.end() > scalar.valid_range.start() {
                                     // We want `table[e as usize]` to not
@@ -335,9 +333,9 @@ pub fn codegen_rvalue_operand(&mut self,
                                     // convenient place to put the `assume`.
 
                                     base::call_assume(&bx, bx.icmp(
-                                        llvm::IntULE,
+                                        IntPredicate::IntULE,
                                         llval,
-                                        C_uint_big(ll_t_in, *scalar.valid_range.end())
+                                        bx.cx().const_uint_big(ll_t_in, *scalar.valid_range.end())
                                     ));
                                 }
                             }
@@ -348,8 +346,8 @@ pub fn codegen_rvalue_operand(&mut self,
                                 bx.intcast(llval, ll_t_out, signed)
                             }
                             (CastTy::Float, CastTy::Float) => {
-                                let srcsz = ll_t_in.float_width();
-                                let dstsz = ll_t_out.float_width();
+                                let srcsz = bx.cx().float_width(ll_t_in);
+                                let dstsz = bx.cx().float_width(ll_t_out);
                                 if dstsz > srcsz {
                                     bx.fpext(llval, ll_t_out)
                                 } else if srcsz > dstsz {
@@ -366,7 +364,7 @@ pub fn codegen_rvalue_operand(&mut self,
                             (CastTy::FnPtr, CastTy::Int(_)) =>
                                 bx.ptrtoint(llval, ll_t_out),
                             (CastTy::Int(_), CastTy::Ptr(_)) => {
-                                let usize_llval = bx.intcast(llval, bx.cx.isize_ty, signed);
+                                let usize_llval = bx.intcast(llval, bx.cx().type_isize(), signed);
                                 bx.inttoptr(usize_llval, ll_t_out)
                             }
                             (CastTy::Int(_), CastTy::Float) =>
@@ -393,15 +391,15 @@ pub fn codegen_rvalue_operand(&mut self,
 
                 // Note: places are indirect, so storing the `llval` into the
                 // destination effectively creates a reference.
-                let val = if !bx.cx.type_has_metadata(ty) {
+                let val = if !bx.cx().type_has_metadata(ty) {
                     OperandValue::Immediate(cg_place.llval)
                 } else {
                     OperandValue::Pair(cg_place.llval, cg_place.llextra.unwrap())
                 };
                 (bx, OperandRef {
                     val,
-                    layout: self.cx.layout_of(self.cx.tcx.mk_ref(
-                        self.cx.tcx.types.re_erased,
+                    layout: self.cx.layout_of(self.cx.tcx().mk_ref(
+                        self.cx.tcx().types.re_erased,
                         ty::TypeAndMut { ty, mutbl: bk.to_mutbl_lossy() }
                     )),
                 })
@@ -411,7 +409,7 @@ pub fn codegen_rvalue_operand(&mut self,
                 let size = self.evaluate_array_len(&bx, place);
                 let operand = OperandRef {
                     val: OperandValue::Immediate(size),
-                    layout: bx.cx.layout_of(bx.tcx().types.usize),
+                    layout: bx.cx().layout_of(bx.tcx().types.usize),
                 };
                 (bx, operand)
             }
@@ -437,7 +435,7 @@ pub fn codegen_rvalue_operand(&mut self,
                 };
                 let operand = OperandRef {
                     val: OperandValue::Immediate(llresult),
-                    layout: bx.cx.layout_of(
+                    layout: bx.cx().layout_of(
                         op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty)),
                 };
                 (bx, operand)
@@ -452,7 +450,7 @@ pub fn codegen_rvalue_operand(&mut self,
                 let operand_ty = bx.tcx().intern_tup(&[val_ty, bx.tcx().types.bool]);
                 let operand = OperandRef {
                     val: result,
-                    layout: bx.cx.layout_of(operand_ty)
+                    layout: bx.cx().layout_of(operand_ty)
                 };
 
                 (bx, operand)
@@ -487,9 +485,9 @@ pub fn codegen_rvalue_operand(&mut self,
             }
 
             mir::Rvalue::NullaryOp(mir::NullOp::SizeOf, ty) => {
-                assert!(bx.cx.type_is_sized(ty));
-                let val = C_usize(bx.cx, bx.cx.size_of(ty).bytes());
-                let tcx = bx.tcx();
+                assert!(bx.cx().type_is_sized(ty));
+                let val = bx.cx().const_usize(bx.cx().layout_of(ty).size.bytes());
+                let tcx = self.cx.tcx();
                 (bx, OperandRef {
                     val: OperandValue::Immediate(val),
                     layout: self.cx.layout_of(tcx.types.usize),
@@ -498,21 +496,21 @@ pub fn codegen_rvalue_operand(&mut self,
 
             mir::Rvalue::NullaryOp(mir::NullOp::Box, content_ty) => {
                 let content_ty: Ty<'tcx> = self.monomorphize(&content_ty);
-                let (size, align) = bx.cx.size_and_align_of(content_ty);
-                let llsize = C_usize(bx.cx, size.bytes());
-                let llalign = C_usize(bx.cx, align.abi());
-                let box_layout = bx.cx.layout_of(bx.tcx().mk_box(content_ty));
-                let llty_ptr = box_layout.llvm_type(bx.cx);
+                let (size, align) = bx.cx().layout_of(content_ty).size_and_align();
+                let llsize = bx.cx().const_usize(size.bytes());
+                let llalign = bx.cx().const_usize(align.abi());
+                let box_layout = bx.cx().layout_of(bx.tcx().mk_box(content_ty));
+                let llty_ptr = bx.cx().backend_type(box_layout);
 
                 // Allocate space:
                 let def_id = match bx.tcx().lang_items().require(ExchangeMallocFnLangItem) {
                     Ok(id) => id,
                     Err(s) => {
-                        bx.sess().fatal(&format!("allocation of `{}` {}", box_layout.ty, s));
+                        bx.cx().sess().fatal(&format!("allocation of `{}` {}", box_layout.ty, s));
                     }
                 };
                 let instance = ty::Instance::mono(bx.tcx(), def_id);
-                let r = callee::get_fn(bx.cx, instance);
+                let r = bx.cx().get_fn(instance);
                 let val = bx.pointercast(bx.call(r, &[llsize, llalign], None), llty_ptr);
 
                 let operand = OperandRef {
@@ -529,7 +527,7 @@ pub fn codegen_rvalue_operand(&mut self,
             mir::Rvalue::Aggregate(..) => {
                 // According to `rvalue_creates_operand`, only ZST
                 // aggregate rvalues are allowed to be operands.
-                let ty = rvalue.ty(self.mir, self.cx.tcx);
+                let ty = rvalue.ty(self.mir, self.cx.tcx());
                 (bx, OperandRef::new_zst(self.cx,
                     self.cx.layout_of(self.monomorphize(&ty))))
             }
@@ -538,32 +536,32 @@ pub fn codegen_rvalue_operand(&mut self,
 
     fn evaluate_array_len(
         &mut self,
-        bx: &Builder<'a, 'll, 'tcx>,
+        bx: &Bx,
         place: &mir::Place<'tcx>,
-    ) -> &'ll Value {
+    ) -> Bx::Value {
         // ZST are passed as operands and require special handling
         // because codegen_place() panics if Local is operand.
         if let mir::Place::Local(index) = *place {
             if let LocalRef::Operand(Some(op)) = self.locals[index] {
                 if let ty::Array(_, n) = op.layout.ty.sty {
-                    let n = n.unwrap_usize(bx.cx.tcx);
-                    return common::C_usize(bx.cx, n);
+                    let n = n.unwrap_usize(bx.cx().tcx());
+                    return bx.cx().const_usize(n);
                 }
             }
         }
         // use common size calculation for non zero-sized types
-        let cg_value = self.codegen_place(&bx, place);
-        return cg_value.len(bx.cx);
+        let cg_value = self.codegen_place(bx, place);
+        return cg_value.len(bx.cx());
     }
 
     pub fn codegen_scalar_binop(
         &mut self,
-        bx: &Builder<'a, 'll, 'tcx>,
+        bx: &Bx,
         op: mir::BinOp,
-        lhs: &'ll Value,
-        rhs: &'ll Value,
+        lhs: Bx::Value,
+        rhs: Bx::Value,
         input_ty: Ty<'tcx>,
-    ) -> &'ll Value {
+    ) -> Bx::Value {
         let is_float = input_ty.is_fp();
         let is_signed = input_ty.is_signed();
         let is_unit = input_ty.is_unit();
@@ -605,7 +603,7 @@ pub fn codegen_scalar_binop(
             mir::BinOp::Shr => common::build_unchecked_rshift(bx, input_ty, lhs, rhs),
             mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt |
             mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => if is_unit {
-                C_bool(bx.cx, match op {
+                bx.cx().const_bool(match op {
                     mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt => false,
                     mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => true,
                     _ => unreachable!()
@@ -626,42 +624,42 @@ pub fn codegen_scalar_binop(
 
     pub fn codegen_fat_ptr_binop(
         &mut self,
-        bx: &Builder<'a, 'll, 'tcx>,
+        bx: &Bx,
         op: mir::BinOp,
-        lhs_addr: &'ll Value,
-        lhs_extra: &'ll Value,
-        rhs_addr: &'ll Value,
-        rhs_extra: &'ll Value,
+        lhs_addr: Bx::Value,
+        lhs_extra: Bx::Value,
+        rhs_addr: Bx::Value,
+        rhs_extra: Bx::Value,
         _input_ty: Ty<'tcx>,
-    ) -> &'ll Value {
+    ) -> Bx::Value {
         match op {
             mir::BinOp::Eq => {
                 bx.and(
-                    bx.icmp(llvm::IntEQ, lhs_addr, rhs_addr),
-                    bx.icmp(llvm::IntEQ, lhs_extra, rhs_extra)
+                    bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr),
+                    bx.icmp(IntPredicate::IntEQ, lhs_extra, rhs_extra)
                 )
             }
             mir::BinOp::Ne => {
                 bx.or(
-                    bx.icmp(llvm::IntNE, lhs_addr, rhs_addr),
-                    bx.icmp(llvm::IntNE, lhs_extra, rhs_extra)
+                    bx.icmp(IntPredicate::IntNE, lhs_addr, rhs_addr),
+                    bx.icmp(IntPredicate::IntNE, lhs_extra, rhs_extra)
                 )
             }
             mir::BinOp::Le | mir::BinOp::Lt |
             mir::BinOp::Ge | mir::BinOp::Gt => {
                 // a OP b ~ a.0 STRICT(OP) b.0 | (a.0 == b.0 && a.1 OP a.1)
                 let (op, strict_op) = match op {
-                    mir::BinOp::Lt => (llvm::IntULT, llvm::IntULT),
-                    mir::BinOp::Le => (llvm::IntULE, llvm::IntULT),
-                    mir::BinOp::Gt => (llvm::IntUGT, llvm::IntUGT),
-                    mir::BinOp::Ge => (llvm::IntUGE, llvm::IntUGT),
+                    mir::BinOp::Lt => (IntPredicate::IntULT, IntPredicate::IntULT),
+                    mir::BinOp::Le => (IntPredicate::IntULE, IntPredicate::IntULT),
+                    mir::BinOp::Gt => (IntPredicate::IntUGT, IntPredicate::IntUGT),
+                    mir::BinOp::Ge => (IntPredicate::IntUGE, IntPredicate::IntUGT),
                     _ => bug!(),
                 };
 
                 bx.or(
                     bx.icmp(strict_op, lhs_addr, rhs_addr),
                     bx.and(
-                        bx.icmp(llvm::IntEQ, lhs_addr, rhs_addr),
+                        bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr),
                         bx.icmp(op, lhs_extra, rhs_extra)
                     )
                 )
@@ -672,19 +670,21 @@ pub fn codegen_fat_ptr_binop(
         }
     }
 
-    pub fn codegen_scalar_checked_binop(&mut self,
-                                      bx: &Builder<'a, 'll, 'tcx>,
-                                      op: mir::BinOp,
-                                      lhs: &'ll Value,
-                                      rhs: &'ll Value,
-                                      input_ty: Ty<'tcx>) -> OperandValue<'ll> {
+    pub fn codegen_scalar_checked_binop(
+        &mut self,
+        bx: &Bx,
+        op: mir::BinOp,
+        lhs: Bx::Value,
+        rhs: Bx::Value,
+        input_ty: Ty<'tcx>
+    ) -> OperandValue<Bx::Value> {
         // This case can currently arise only from functions marked
         // with #[rustc_inherit_overflow_checks] and inlined from
         // another crate (mostly core::num generic/#[inline] fns),
         // while the current crate doesn't use overflow checks.
-        if !bx.cx.check_overflow {
+        if !bx.cx().check_overflow() {
             let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
-            return OperandValue::Pair(val, C_bool(bx.cx, false));
+            return OperandValue::Pair(val, bx.cx().const_bool(false));
         }
 
         let (val, of) = match op {
@@ -703,12 +703,12 @@ pub fn codegen_scalar_checked_binop(&mut self,
                  bx.extract_value(res, 1))
             }
             mir::BinOp::Shl | mir::BinOp::Shr => {
-                let lhs_llty = val_ty(lhs);
-                let rhs_llty = val_ty(rhs);
-                let invert_mask = common::shift_mask_val(&bx, lhs_llty, rhs_llty, true);
+                let lhs_llty = bx.cx().val_ty(lhs);
+                let rhs_llty = bx.cx().val_ty(rhs);
+                let invert_mask = common::shift_mask_val(bx, lhs_llty, rhs_llty, true);
                 let outer_bits = bx.and(rhs, invert_mask);
 
-                let of = bx.icmp(llvm::IntNE, outer_bits, C_null(rhs_llty));
+                let of = bx.icmp(IntPredicate::IntNE, outer_bits, bx.cx().const_null(rhs_llty));
                 let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
 
                 (val, of)
@@ -720,7 +720,9 @@ pub fn codegen_scalar_checked_binop(&mut self,
 
         OperandValue::Pair(val, of)
     }
+}
 
+impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
     pub fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>) -> bool {
         match *rvalue {
             mir::Rvalue::Ref(..) |
@@ -735,7 +737,7 @@ pub fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>) -> bool {
                 true,
             mir::Rvalue::Repeat(..) |
             mir::Rvalue::Aggregate(..) => {
-                let ty = rvalue.ty(self.mir, self.cx.tcx);
+                let ty = rvalue.ty(self.mir, self.cx.tcx());
                 let ty = self.monomorphize(&ty);
                 self.cx.layout_of(ty).is_zst()
             }
@@ -750,7 +752,11 @@ enum OverflowOp {
     Add, Sub, Mul
 }
 
-fn get_overflow_intrinsic(oop: OverflowOp, bx: &Builder<'_, 'll, '_>, ty: Ty) -> &'ll Value {
+fn get_overflow_intrinsic<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
+    oop: OverflowOp,
+    bx: &Bx,
+    ty: Ty
+) -> Bx::Value {
     use syntax::ast::IntTy::*;
     use syntax::ast::UintTy::*;
     use rustc::ty::{Int, Uint};
@@ -812,18 +818,22 @@ fn get_overflow_intrinsic(oop: OverflowOp, bx: &Builder<'_, 'll, '_>, ty: Ty) ->
         },
     };
 
-    bx.cx.get_intrinsic(&name)
+    bx.cx().get_intrinsic(&name)
 }
 
-fn cast_int_to_float(bx: &Builder<'_, 'll, '_>,
-                     signed: bool,
-                     x: &'ll Value,
-                     int_ty: &'ll Type,
-                     float_ty: &'ll Type) -> &'ll Value {
+fn cast_int_to_float<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
+    bx: &Bx,
+    signed: bool,
+    x: Bx::Value,
+    int_ty: Bx::Type,
+    float_ty: Bx::Type
+) -> Bx::Value {
     // Most integer types, even i128, fit into [-f32::MAX, f32::MAX] after rounding.
     // It's only u128 -> f32 that can cause overflows (i.e., should yield infinity).
     // LLVM's uitofp produces undef in those cases, so we manually check for that case.
-    let is_u128_to_f32 = !signed && int_ty.int_width() == 128 && float_ty.float_width() == 32;
+    let is_u128_to_f32 = !signed &&
+        bx.cx().int_width(int_ty) == 128 &&
+        bx.cx().float_width(float_ty) == 32;
     if is_u128_to_f32 {
         // All inputs greater or equal to (f32::MAX + 0.5 ULP) are rounded to infinity,
         // and for everything else LLVM's uitofp works just fine.
@@ -831,10 +841,10 @@ fn cast_int_to_float(bx: &Builder<'_, 'll, '_>,
         use rustc_apfloat::Float;
         const MAX_F32_PLUS_HALF_ULP: u128 = ((1 << (Single::PRECISION + 1)) - 1)
                                             << (Single::MAX_EXP - Single::PRECISION as i16);
-        let max = C_uint_big(int_ty, MAX_F32_PLUS_HALF_ULP);
-        let overflow = bx.icmp(llvm::IntUGE, x, max);
-        let infinity_bits = C_u32(bx.cx, ieee::Single::INFINITY.to_bits() as u32);
-        let infinity = consts::bitcast(infinity_bits, float_ty);
+        let max = bx.cx().const_uint_big(int_ty, MAX_F32_PLUS_HALF_ULP);
+        let overflow = bx.icmp(IntPredicate::IntUGE, x, max);
+        let infinity_bits = bx.cx().const_u32(ieee::Single::INFINITY.to_bits() as u32);
+        let infinity = bx.bitcast(infinity_bits, float_ty);
         bx.select(overflow, infinity, bx.uitofp(x, float_ty))
     } else {
         if signed {
@@ -845,18 +855,20 @@ fn cast_int_to_float(bx: &Builder<'_, 'll, '_>,
     }
 }
 
-fn cast_float_to_int(bx: &Builder<'_, 'll, '_>,
-                     signed: bool,
-                     x: &'ll Value,
-                     float_ty: &'ll Type,
-                     int_ty: &'ll Type) -> &'ll Value {
+fn cast_float_to_int<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
+    bx: &Bx,
+    signed: bool,
+    x: Bx::Value,
+    float_ty: Bx::Type,
+    int_ty: Bx::Type
+) -> Bx::Value {
     let fptosui_result = if signed {
         bx.fptosi(x, int_ty)
     } else {
         bx.fptoui(x, int_ty)
     };
 
-    if !bx.sess().opts.debugging_opts.saturating_float_casts {
+    if !bx.cx().sess().opts.debugging_opts.saturating_float_casts {
         return fptosui_result;
     }
     // LLVM's fpto[su]i returns undef when the input x is infinite, NaN, or does not fit into the
@@ -878,39 +890,50 @@ fn cast_float_to_int(bx: &Builder<'_, 'll, '_>,
     // On the other hand, f_max works even if int_ty::MAX is greater than float_ty::MAX. Because
     // we're rounding towards zero, we just get float_ty::MAX (which is always an integer).
     // This already happens today with u128::MAX = 2^128 - 1 > f32::MAX.
-    fn compute_clamp_bounds<F: Float>(signed: bool, int_ty: &Type) -> (u128, u128) {
-        let rounded_min = F::from_i128_r(int_min(signed, int_ty), Round::TowardZero);
-        assert_eq!(rounded_min.status, Status::OK);
-        let rounded_max = F::from_u128_r(int_max(signed, int_ty), Round::TowardZero);
-        assert!(rounded_max.value.is_finite());
-        (rounded_min.value.to_bits(), rounded_max.value.to_bits())
-    }
-    fn int_max(signed: bool, int_ty: &Type) -> u128 {
-        let shift_amount = 128 - int_ty.int_width();
+    let int_max = |signed: bool, int_ty: Bx::Type| -> u128 {
+        let shift_amount = 128 - bx.cx().int_width(int_ty);
         if signed {
             i128::MAX as u128 >> shift_amount
         } else {
             u128::MAX >> shift_amount
         }
-    }
-    fn int_min(signed: bool, int_ty: &Type) -> i128 {
+    };
+    let int_min = |signed: bool, int_ty: Bx::Type| -> i128 {
         if signed {
-            i128::MIN >> (128 - int_ty.int_width())
+            i128::MIN >> (128 - bx.cx().int_width(int_ty))
         } else {
             0
         }
-    }
+    };
+
+    let compute_clamp_bounds_single =
+    |signed: bool, int_ty: Bx::Type| -> (u128, u128) {
+        let rounded_min = ieee::Single::from_i128_r(int_min(signed, int_ty), Round::TowardZero);
+        assert_eq!(rounded_min.status, Status::OK);
+        let rounded_max = ieee::Single::from_u128_r(int_max(signed, int_ty), Round::TowardZero);
+        assert!(rounded_max.value.is_finite());
+        (rounded_min.value.to_bits(), rounded_max.value.to_bits())
+    };
+    let compute_clamp_bounds_double =
+    |signed: bool, int_ty: Bx::Type| -> (u128, u128) {
+        let rounded_min = ieee::Double::from_i128_r(int_min(signed, int_ty), Round::TowardZero);
+        assert_eq!(rounded_min.status, Status::OK);
+        let rounded_max = ieee::Double::from_u128_r(int_max(signed, int_ty), Round::TowardZero);
+        assert!(rounded_max.value.is_finite());
+        (rounded_min.value.to_bits(), rounded_max.value.to_bits())
+    };
+
     let float_bits_to_llval = |bits| {
-        let bits_llval = match float_ty.float_width() {
-            32 => C_u32(bx.cx, bits as u32),
-            64 => C_u64(bx.cx, bits as u64),
+        let bits_llval = match bx.cx().float_width(float_ty) {
+            32 => bx.cx().const_u32(bits as u32),
+            64 => bx.cx().const_u64(bits as u64),
             n => bug!("unsupported float width {}", n),
         };
-        consts::bitcast(bits_llval, float_ty)
+        bx.bitcast(bits_llval, float_ty)
     };
-    let (f_min, f_max) = match float_ty.float_width() {
-        32 => compute_clamp_bounds::<ieee::Single>(signed, int_ty),
-        64 => compute_clamp_bounds::<ieee::Double>(signed, int_ty),
+    let (f_min, f_max) = match bx.cx().float_width(float_ty) {
+        32 => compute_clamp_bounds_single(signed, int_ty),
+        64 => compute_clamp_bounds_double(signed, int_ty),
         n => bug!("unsupported float width {}", n),
     };
     let f_min = float_bits_to_llval(f_min);
@@ -956,10 +979,10 @@ fn int_min(signed: bool, int_ty: &Type) -> i128 {
     // negation, and the negation can be merged into the select. Therefore, it not necessarily any
     // more expensive than a ordered ("normal") comparison. Whether these optimizations will be
     // performed is ultimately up to the backend, but at least x86 does perform them.
-    let less_or_nan = bx.fcmp(llvm::RealULT, x, f_min);
-    let greater = bx.fcmp(llvm::RealOGT, x, f_max);
-    let int_max = C_uint_big(int_ty, int_max(signed, int_ty));
-    let int_min = C_uint_big(int_ty, int_min(signed, int_ty) as u128);
+    let less_or_nan = bx.fcmp(RealPredicate::RealULT, x, f_min);
+    let greater = bx.fcmp(RealPredicate::RealOGT, x, f_max);
+    let int_max = bx.cx().const_uint_big(int_ty, int_max(signed, int_ty));
+    let int_min = bx.cx().const_uint_big(int_ty, int_min(signed, int_ty) as u128);
     let s0 = bx.select(less_or_nan, int_min, fptosui_result);
     let s1 = bx.select(greater, int_max, s0);
 
@@ -968,7 +991,7 @@ fn int_min(signed: bool, int_ty: &Type) -> i128 {
     // Therefore we only need to execute this step for signed integer types.
     if signed {
         // LLVM has no isNaN predicate, so we use (x == x) instead
-        bx.select(bx.fcmp(llvm::RealOEQ, x, x), s1, C_uint(int_ty, 0))
+        bx.select(bx.fcmp(RealPredicate::RealOEQ, x, x), s1, bx.cx().const_uint(int_ty, 0))
     } else {
         s1
     }