]> git.lizzy.rs Git - rust.git/commitdiff
Prefixed type methods & removed trait impl for write::CodegenContext
authorDenis Merigoux <denis.merigoux@gmail.com>
Thu, 6 Sep 2018 20:52:15 +0000 (13:52 -0700)
committerEduard-Mihai Burtescu <edy.burt@gmail.com>
Fri, 16 Nov 2018 12:11:59 +0000 (14:11 +0200)
20 files changed:
src/librustc_codegen_llvm/abi.rs
src/librustc_codegen_llvm/asm.rs
src/librustc_codegen_llvm/back/write.rs
src/librustc_codegen_llvm/base.rs
src/librustc_codegen_llvm/builder.rs
src/librustc_codegen_llvm/common.rs
src/librustc_codegen_llvm/consts.rs
src/librustc_codegen_llvm/context.rs
src/librustc_codegen_llvm/debuginfo/gdb.rs
src/librustc_codegen_llvm/interfaces/type_.rs
src/librustc_codegen_llvm/intrinsic.rs
src/librustc_codegen_llvm/meth.rs
src/librustc_codegen_llvm/mir/block.rs
src/librustc_codegen_llvm/mir/constant.rs
src/librustc_codegen_llvm/mir/mod.rs
src/librustc_codegen_llvm/mir/operand.rs
src/librustc_codegen_llvm/mir/place.rs
src/librustc_codegen_llvm/mir/rvalue.rs
src/librustc_codegen_llvm/type_.rs
src/librustc_codegen_llvm/type_of.rs

index edd1ff59a01e5eb3922adceea9a313835314c912..0d570bc8adabaf78af900b39ad48476bfbdf440b 100644 (file)
@@ -111,16 +111,16 @@ pub trait LlvmType {
 impl LlvmType for Reg {
     fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type {
         match self.kind {
-            RegKind::Integer => cx.ix(self.size.bits()),
+            RegKind::Integer => cx.type_ix(self.size.bits()),
             RegKind::Float => {
                 match self.size.bits() {
-                    32 => cx.f32(),
-                    64 => cx.f64(),
+                    32 => cx.type_f32(),
+                    64 => cx.type_f64(),
                     _ => bug!("unsupported float: {:?}", self)
                 }
             }
             RegKind::Vector => {
-                cx.vector(cx.i8(), self.size.bytes())
+                cx.type_vector(cx.type_i8(), self.size.bytes())
             }
         }
     }
@@ -144,7 +144,7 @@ fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type {
 
             // Simplify to array when all chunks are the same size and type
             if rem_bytes == 0 {
-                return cx.array(rest_ll_unit, rest_count);
+                return cx.type_array(rest_ll_unit, rest_count);
             }
         }
 
@@ -159,10 +159,10 @@ fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type {
         if rem_bytes != 0 {
             // Only integers can be really split further.
             assert_eq!(self.rest.unit.kind, RegKind::Integer);
-            args.push(cx.ix(rem_bytes * 8));
+            args.push(cx.type_ix(rem_bytes * 8));
         }
 
-        cx.struct_(&args, false)
+        cx.type_struct(&args, false)
     }
 }
 
@@ -212,7 +212,7 @@ fn store(
             // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
             let can_store_through_cast_ptr = false;
             if can_store_through_cast_ptr {
-                let cast_dst = bx.pointercast(dst.llval, cx.ptr_to(cast.llvm_type(cx)));
+                let cast_dst = bx.pointercast(dst.llval, cx.type_ptr_to(cast.llvm_type(cx)));
                 bx.store(val, cast_dst, self.layout.align);
             } else {
                 // The actual return type is a struct, but the ABI
@@ -240,9 +240,9 @@ fn store(
 
                 // ...and then memcpy it to the intended destination.
                 base::call_memcpy(bx,
-                                  bx.pointercast(dst.llval, cx.i8p()),
+                                  bx.pointercast(dst.llval, cx.type_i8p()),
                                   self.layout.align,
-                                  bx.pointercast(llscratch, cx.i8p()),
+                                  bx.pointercast(llscratch, cx.type_i8p()),
                                   scratch_align,
                                   cx.const_usize(self.layout.size.bytes()),
                                   MemFlags::empty());
@@ -635,14 +635,14 @@ fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
         );
 
         let llreturn_ty = match self.ret.mode {
-            PassMode::Ignore => cx.void(),
+            PassMode::Ignore => cx.type_void(),
             PassMode::Direct(_) | PassMode::Pair(..) => {
                 self.ret.layout.immediate_llvm_type(cx)
             }
             PassMode::Cast(cast) => cast.llvm_type(cx),
             PassMode::Indirect(..) => {
-                llargument_tys.push(cx.ptr_to(self.ret.memory_ty(cx)));
-                cx.void()
+                llargument_tys.push(cx.type_ptr_to(self.ret.memory_ty(cx)));
+                cx.type_void()
             }
         };
 
@@ -668,15 +668,15 @@ fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
                     continue;
                 }
                 PassMode::Cast(cast) => cast.llvm_type(cx),
-                PassMode::Indirect(_, None) => cx.ptr_to(arg.memory_ty(cx)),
+                PassMode::Indirect(_, None) => cx.type_ptr_to(arg.memory_ty(cx)),
             };
             llargument_tys.push(llarg_ty);
         }
 
         if self.variadic {
-            cx.variadic_func(&llargument_tys, llreturn_ty)
+            cx.type_variadic_func(&llargument_tys, llreturn_ty)
         } else {
-            cx.func(&llargument_tys, llreturn_ty)
+            cx.type_func(&llargument_tys, llreturn_ty)
         }
     }
 
index fcf2ea8f8fa23921678461419a9b7550de749f51..0a1cc1e36717217c848dd8c70e50f59c22468756 100644 (file)
@@ -75,9 +75,9 @@ pub fn codegen_inline_asm(
     // Depending on how many outputs we have, the return type is different
     let num_outputs = output_types.len();
     let output_type = match num_outputs {
-        0 => bx.cx().void(),
+        0 => bx.cx().type_void(),
         1 => output_types[0],
-        _ => bx.cx().struct_(&output_types, false)
+        _ => bx.cx().type_struct(&output_types, false)
     };
 
     let asm = CString::new(ia.asm.as_str().as_bytes()).unwrap();
index 9eaafb1c21d3ed0acf1fe18eca78a8b06f3f083a..6977b1285099281b7e28b3a905ef474cfcebe8ff 100644 (file)
@@ -24,7 +24,7 @@
 use rustc::session::Session;
 use rustc::util::nodemap::FxHashMap;
 use time_graph::{self, TimeGraph, Timeline};
-use llvm::{self, DiagnosticInfo, PassManager, SMDiagnostic, BasicBlock};
+use llvm::{self, DiagnosticInfo, PassManager, SMDiagnostic};
 use llvm_util;
 use {CodegenResults, ModuleCodegen, CompiledModule, ModuleKind, // ModuleLlvm,
      CachedModuleCodegen};
@@ -46,7 +46,6 @@
 use syntax_pos::symbol::Symbol;
 use type_::Type;
 use context::{is_pie_binary, get_reloc_model};
-use interfaces::{Backend, CommonWriteMethods};
 use common;
 use jobserver::{Client, Acquired};
 use rustc_demangle;
@@ -429,15 +428,8 @@ pub(crate) fn save_temp_bitcode(&self, module: &ModuleCodegen, name: &str) {
     }
 }
 
-impl<'ll> Backend for CodegenContext<'ll> {
-    type Value = &'ll Value;
-    type BasicBlock = &'ll BasicBlock;
-    type Type = &'ll Type;
-    type Context = &'ll llvm::Context;
-    type TypeKind = llvm::TypeKind;
-}
 
-impl CommonWriteMethods for CodegenContext<'ll> {
+impl CodegenContext<'ll> {
     fn val_ty(&self, v: &'ll Value) -> &'ll Type {
         common::val_ty(v)
     }
@@ -446,18 +438,7 @@ fn const_bytes_in_context(&self, llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll
         common::const_bytes_in_context(llcx, bytes)
     }
 
-    fn const_struct_in_context(
-        &self,
-        llcx: &'a llvm::Context,
-        elts: &[&'a Value],
-        packed: bool,
-    ) -> &'a Value {
-        common::const_struct_in_context(llcx, elts, packed)
-    }
-}
-
-impl CodegenContext<'ll> {
-    pub fn ptr_to(&self, ty: &'ll Type) -> &'ll Type {
+    pub fn type_ptr_to(&self, ty: &'ll Type) -> &'ll Type {
         unsafe {
             llvm::LLVMPointerType(ty, 0)
         }
index 6d75d64124b8398f7263bb05397f94b1b761ca26..0fab55972bf609be86659b568250eb719bfed863 100644 (file)
@@ -234,13 +234,13 @@ pub fn unsize_thin_ptr(
         (&ty::RawPtr(ty::TypeAndMut { ty: a, .. }),
          &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => {
             assert!(bx.cx().type_is_sized(a));
-            let ptr_ty = bx.cx().ptr_to(bx.cx().layout_of(b).llvm_type(bx.cx()));
+            let ptr_ty = bx.cx().type_ptr_to(bx.cx().layout_of(b).llvm_type(bx.cx()));
             (bx.pointercast(src, ptr_ty), unsized_info(bx.cx(), a, b, None))
         }
         (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) if def_a.is_box() && def_b.is_box() => {
             let (a, b) = (src_ty.boxed_ty(), dst_ty.boxed_ty());
             assert!(bx.cx().type_is_sized(a));
-            let ptr_ty = bx.cx().ptr_to(bx.cx().layout_of(b).llvm_type(bx.cx()));
+            let ptr_ty = bx.cx().type_ptr_to(bx.cx().layout_of(b).llvm_type(bx.cx()));
             (bx.pointercast(src, ptr_ty), unsized_info(bx.cx(), a, b, None))
         }
         (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
@@ -353,10 +353,10 @@ fn cast_shift_rhs<'ll, F, G>(bx: &Builder<'_, 'll, '_>,
     if op.is_shift() {
         let mut rhs_llty = bx.cx().val_ty(rhs);
         let mut lhs_llty = bx.cx().val_ty(lhs);
-        if bx.cx().kind(rhs_llty) == TypeKind::Vector {
+        if bx.cx().type_kind(rhs_llty) == TypeKind::Vector {
             rhs_llty = bx.cx().element_type(rhs_llty)
         }
-        if bx.cx().kind(lhs_llty) == TypeKind::Vector {
+        if bx.cx().type_kind(lhs_llty) == TypeKind::Vector {
             lhs_llty = bx.cx().element_type(lhs_llty)
         }
         let rhs_sz = bx.cx().int_width(rhs_llty);
@@ -393,8 +393,8 @@ pub fn from_immediate<'a, 'll: 'a, 'tcx: 'll>(
     bx: &Builder<'_ ,'ll, '_, &'ll Value>,
     val: &'ll Value
 ) -> &'ll Value {
-    if bx.cx().val_ty(val) == bx.cx().i1() {
-        bx.zext(val, bx.cx().i8())
+    if bx.cx().val_ty(val) == bx.cx().type_i1() {
+        bx.zext(val, bx.cx().type_i8())
     } else {
         val
     }
@@ -417,7 +417,7 @@ pub fn to_immediate_scalar(
     scalar: &layout::Scalar,
 ) -> &'ll Value {
     if scalar.is_bool() {
-        return bx.trunc(val, bx.cx().i1());
+        return bx.trunc(val, bx.cx().type_i1());
     }
     val
 }
@@ -434,13 +434,13 @@ pub fn call_memcpy<'a, 'll: 'a, 'tcx: 'll>(
     if flags.contains(MemFlags::NONTEMPORAL) {
         // HACK(nox): This is inefficient but there is no nontemporal memcpy.
         let val = bx.load(src, src_align);
-        let ptr = bx.pointercast(dst, bx.cx().ptr_to(bx.cx().val_ty(val)));
+        let ptr = bx.pointercast(dst, bx.cx().type_ptr_to(bx.cx().val_ty(val)));
         bx.store_with_flags(val, ptr, dst_align, flags);
         return;
     }
     let cx = bx.cx();
-    let src_ptr = bx.pointercast(src, cx.i8p());
-    let dst_ptr = bx.pointercast(dst, cx.i8p());
+    let src_ptr = bx.pointercast(src, cx.type_i8p());
+    let dst_ptr = bx.pointercast(dst, cx.type_i8p());
     let size = bx.intcast(n_bytes, cx.isize_ty, false);
     let volatile = flags.contains(MemFlags::VOLATILE);
     bx.memcpy(dst_ptr, dst_align.abi(), src_ptr, src_align.abi(), size, volatile);
@@ -551,7 +551,7 @@ fn create_entry_fn(
         use_start_lang_item: bool,
     ) {
         let llfty =
-            cx.func(&[cx.t_int(), cx.ptr_to(cx.i8p())], cx.t_int());
+            cx.type_func(&[cx.type_int(), cx.type_ptr_to(cx.type_i8p())], cx.type_int());
 
         let main_ret_ty = cx.tcx.fn_sig(rust_main_def_id).output();
         // Given that `main()` has no arguments,
@@ -594,7 +594,7 @@ fn create_entry_fn(
                 start_def_id,
                 cx.tcx.intern_substs(&[main_ret_ty.into()]),
             );
-            (start_fn, vec![bx.pointercast(rust_main, cx.ptr_to(cx.i8p())),
+            (start_fn, vec![bx.pointercast(rust_main, cx.type_ptr_to(cx.type_i8p())),
                             arg_argc, arg_argv])
         } else {
             debug!("using user-defined start fn");
@@ -602,7 +602,7 @@ fn create_entry_fn(
         };
 
         let result = bx.call(start_fn, &args, None);
-        bx.ret(bx.intcast(result, cx.t_int(), true));
+        bx.ret(bx.intcast(result, cx.type_int(), true));
     }
 }
 
@@ -1151,7 +1151,10 @@ fn module_codegen<'a, 'tcx>(
             if !cx.used_statics.borrow().is_empty() {
                 let name = const_cstr!("llvm.used");
                 let section = const_cstr!("llvm.metadata");
-                let array = cx.const_array(&cx.ptr_to(cx.i8()), &*cx.used_statics.borrow());
+                let array = cx.const_array(
+                    &cx.type_ptr_to(cx.type_i8()),
+                    &*cx.used_statics.borrow()
+                );
 
                 unsafe {
                     let g = llvm::LLVMAddGlobal(cx.llmod,
index ef7f29ac7ddc8d23e6fccef94ee94bb983b6bdd3..e55c942f606a25b31b5777e94747a8a4cbcc5f9c 100644 (file)
@@ -765,7 +765,7 @@ fn inline_asm_call(&self, asm: *const c_char, cons: *const c_char,
         }).collect::<Vec<_>>();
 
         debug!("Asm Output Type: {:?}", output);
-        let fty = &self.cx().func(&argtys[..], output);
+        let fty = &self.cx().type_func(&argtys[..], output);
         unsafe {
             // Ask LLVM to verify that the constraints are well-formed.
             let constraints_ok = llvm::LLVMRustInlineAsmVerify(fty, cons);
@@ -861,9 +861,9 @@ fn shuffle_vector(&self, v1: &'ll Value, v2: &'ll Value, mask: &'ll Value) -> &'
     fn vector_splat(&self, num_elts: usize, elt: &'ll Value) -> &'ll Value {
         unsafe {
             let elt_ty = self.cx.val_ty(elt);
-            let undef = llvm::LLVMGetUndef(&self.cx().vector(elt_ty, num_elts as u64));
+            let undef = llvm::LLVMGetUndef(&self.cx().type_vector(elt_ty, num_elts as u64));
             let vec = self.insert_element(undef, elt, self.cx.const_i32(0));
-            let vec_i32_ty = &self.cx().vector(&self.cx().i32(), num_elts as u64);
+            let vec_i32_ty = &self.cx().type_vector(&self.cx().type_i32(), num_elts as u64);
             self.shuffle_vector(vec, undef, self.cx().const_null(vec_i32_ty))
         }
     }
@@ -1142,9 +1142,9 @@ fn check_store<'b>(&self,
                        ptr: &'ll Value) -> &'ll Value {
         let dest_ptr_ty = self.cx.val_ty(ptr);
         let stored_ty = self.cx.val_ty(val);
-        let stored_ptr_ty = self.cx.ptr_to(stored_ty);
+        let stored_ptr_ty = self.cx.type_ptr_to(stored_ty);
 
-        assert_eq!(self.cx.kind(dest_ptr_ty), llvm::TypeKind::Pointer);
+        assert_eq!(self.cx.type_kind(dest_ptr_ty), llvm::TypeKind::Pointer);
 
         if dest_ptr_ty == stored_ptr_ty {
             ptr
@@ -1163,14 +1163,14 @@ fn check_call<'b>(&self,
                       args: &'b [&'ll Value]) -> Cow<'b, [&'ll Value]> {
         let mut fn_ty = self.cx.val_ty(llfn);
         // Strip off pointers
-        while self.cx.kind(fn_ty) == llvm::TypeKind::Pointer {
+        while self.cx.type_kind(fn_ty) == llvm::TypeKind::Pointer {
             fn_ty = self.cx.element_type(fn_ty);
         }
 
-        assert!(self.cx.kind(fn_ty) == llvm::TypeKind::Function,
+        assert!(self.cx.type_kind(fn_ty) == llvm::TypeKind::Function,
                 "builder::{} not passed a function, but {:?}", typ, fn_ty);
 
-        let param_tys = self.cx.func_params(fn_ty);
+        let param_tys = self.cx.func_params_types(fn_ty);
 
         let all_args_match = param_tys.iter()
             .zip(args.iter().map(|&v| self.cx().val_ty(v)))
@@ -1227,7 +1227,7 @@ fn call_lifetime_intrinsic(&self, intrinsic: &str, ptr: &'ll Value, size: Size)
 
         let lifetime_intrinsic = self.cx.get_intrinsic(intrinsic);
 
-        let ptr = self.pointercast(ptr, self.cx.i8p());
+        let ptr = self.pointercast(ptr, self.cx.type_i8p());
         self.call(lifetime_intrinsic, &[self.cx.const_u64(size), ptr], None);
     }
 
index db4d732ff142720c4d43d614dcefa4082f8a360d..fb049b0c9a8bf8bc97759d6a8d4489e7370d4d07 100644 (file)
@@ -236,19 +236,19 @@ fn const_uint_big(&self, t: &'ll Type, u: u128) -> &'ll Value {
     }
 
     fn const_bool(&self, val: bool) -> &'ll Value {
-        &self.const_uint(&self.i1(), val as u64)
+        &self.const_uint(&self.type_i1(), val as u64)
     }
 
     fn const_i32(&self, i: i32) -> &'ll Value {
-        &self.const_int(&self.i32(), i as i64)
+        &self.const_int(&self.type_i32(), i as i64)
     }
 
     fn const_u32(&self, i: u32) -> &'ll Value {
-        &self.const_uint(&self.i32(), i as u64)
+        &self.const_uint(&self.type_i32(), i as u64)
     }
 
     fn const_u64(&self, i: u64) -> &'ll Value {
-        &self.const_uint(&self.i64(), i)
+        &self.const_uint(&self.type_i64(), i)
     }
 
     fn const_usize(&self, i: u64) -> &'ll Value {
@@ -262,7 +262,7 @@ fn const_usize(&self, i: u64) -> &'ll Value {
     }
 
     fn const_u8(&self, i: u8) -> &'ll Value {
-        &self.const_uint(&self.i8(), i as u64)
+        &self.const_uint(&self.type_i8(), i as u64)
     }
 
 
@@ -300,7 +300,7 @@ fn const_cstr(
     fn const_str_slice(&self, s: LocalInternedString) -> &'ll Value {
         let len = s.len();
         let cs = consts::ptrcast(&self.const_cstr(s, false),
-            &self.ptr_to(&self.layout_of(&self.tcx.mk_str()).llvm_type(&self)));
+            &self.type_ptr_to(&self.layout_of(&self.tcx.mk_str()).llvm_type(&self)));
         &self.const_fat_ptr(cs, &self.const_usize(len as u64))
     }
 
@@ -505,7 +505,7 @@ pub fn shift_mask_val(
     mask_llty: &'ll Type,
     invert: bool
 ) -> &'ll Value {
-    let kind = bx.cx().kind(llty);
+    let kind = bx.cx().type_kind(llty);
     match kind {
         TypeKind::Integer => {
             // i8/u8 can shift by at most 7, i16/u16 by at most 15, etc.
index 49f30d6757494677c6f3a44d01c2fbc47352bbfa..2bc9ab084352c0feb61704618eae1bf758e5c04f 100644 (file)
@@ -313,8 +313,8 @@ pub fn codegen_static<'a, 'tcx>(
         // boolean SSA values are i1, but they have to be stored in i8 slots,
         // otherwise some LLVM optimization passes don't work as expected
         let mut val_llty = cx.val_ty(v);
-        let v = if val_llty == cx.i1() {
-            val_llty = cx.i8();
+        let v = if val_llty == cx.type_i1() {
+            val_llty = cx.type_i8();
             llvm::LLVMConstZExt(v, val_llty)
         } else {
             v
@@ -432,7 +432,7 @@ pub fn codegen_static<'a, 'tcx>(
 
         if attrs.flags.contains(CodegenFnAttrFlags::USED) {
             // This static will be stored in the llvm.used variable which is an array of i8*
-            let cast = llvm::LLVMConstPointerCast(g, cx.i8p());
+            let cast = llvm::LLVMConstPointerCast(g, cx.type_i8p());
             cx.used_statics.borrow_mut().push(cast);
         }
     }
index e19143efa984fca7e5372057de83637da58dd4aa..1c25cec35ccece177d72bbba6fee4dbcd7c9426e 100644 (file)
@@ -380,7 +380,7 @@ pub fn eh_personality(&self) -> &'b Value {
                 } else {
                     "rust_eh_personality"
                 };
-                let fty = &self.variadic_func(&[], &self.i32());
+                let fty = &self.type_variadic_func(&[], &self.type_i32());
                 declare::declare_cfn(self, name, fty)
             }
         };
@@ -488,7 +488,7 @@ fn declare_intrinsic(
     macro_rules! ifn {
         ($name:expr, fn() -> $ret:expr) => (
             if key == $name {
-                let f = declare::declare_cfn(cx, $name, cx.func(&[], $ret));
+                let f = declare::declare_cfn(cx, $name, cx.type_func(&[], $ret));
                 llvm::SetUnnamedAddr(f, false);
                 cx.intrinsics.borrow_mut().insert($name, f.clone());
                 return Some(f);
@@ -496,7 +496,7 @@ macro_rules! ifn {
         );
         ($name:expr, fn(...) -> $ret:expr) => (
             if key == $name {
-                let f = declare::declare_cfn(cx, $name, cx.variadic_func(&[], $ret));
+                let f = declare::declare_cfn(cx, $name, cx.type_variadic_func(&[], $ret));
                 llvm::SetUnnamedAddr(f, false);
                 cx.intrinsics.borrow_mut().insert($name, f.clone());
                 return Some(f);
@@ -504,7 +504,7 @@ macro_rules! ifn {
         );
         ($name:expr, fn($($arg:expr),*) -> $ret:expr) => (
             if key == $name {
-                let f = declare::declare_cfn(cx, $name, cx.func(&[$($arg),*], $ret));
+                let f = declare::declare_cfn(cx, $name, cx.type_func(&[$($arg),*], $ret));
                 llvm::SetUnnamedAddr(f, false);
                 cx.intrinsics.borrow_mut().insert($name, f.clone());
                 return Some(f);
@@ -512,28 +512,28 @@ macro_rules! ifn {
         );
     }
     macro_rules! mk_struct {
-        ($($field_ty:expr),*) => (cx.struct_( &[$($field_ty),*], false))
+        ($($field_ty:expr),*) => (cx.type_struct( &[$($field_ty),*], false))
     }
 
-    let i8p = cx.i8p();
-    let void = cx.void();
-    let i1 = cx.i1();
-    let t_i8 = cx.i8();
-    let t_i16 = cx.i16();
-    let t_i32 = cx.i32();
-    let t_i64 = cx.i64();
-    let t_i128 = cx.i128();
-    let t_f32 = cx.f32();
-    let t_f64 = cx.f64();
-
-    let t_v2f32 = cx.vector(t_f32, 2);
-    let t_v4f32 = cx.vector(t_f32, 4);
-    let t_v8f32 = cx.vector(t_f32, 8);
-    let t_v16f32 = cx.vector(t_f32, 16);
-
-    let t_v2f64 = cx.vector(t_f64, 2);
-    let t_v4f64 = cx.vector(t_f64, 4);
-    let t_v8f64 = cx.vector(t_f64, 8);
+    let i8p = cx.type_i8p();
+    let void = cx.type_void();
+    let i1 = cx.type_i1();
+    let t_i8 = cx.type_i8();
+    let t_i16 = cx.type_i16();
+    let t_i32 = cx.type_i32();
+    let t_i64 = cx.type_i64();
+    let t_i128 = cx.type_i128();
+    let t_f32 = cx.type_f32();
+    let t_f64 = cx.type_f64();
+
+    let t_v2f32 = cx.type_vector(t_f32, 2);
+    let t_v4f32 = cx.type_vector(t_f32, 4);
+    let t_v8f32 = cx.type_vector(t_f32, 8);
+    let t_v16f32 = cx.type_vector(t_f32, 16);
+
+    let t_v2f64 = cx.type_vector(t_f64, 2);
+    let t_v4f64 = cx.type_vector(t_f64, 4);
+    let t_v8f64 = cx.type_vector(t_f64, 8);
 
     ifn!("llvm.memset.p0i8.i16", fn(i8p, t_i8, t_i16, t_i32, i1) -> void);
     ifn!("llvm.memset.p0i8.i32", fn(i8p, t_i8, t_i32, t_i32, i1) -> void);
@@ -786,8 +786,8 @@ macro_rules! mk_struct {
     ifn!("llvm.prefetch", fn(i8p, t_i32, t_i32, t_i32) -> void);
 
     if cx.sess().opts.debuginfo != DebugInfo::None {
-        ifn!("llvm.dbg.declare", fn(cx.metadata(), cx.metadata()) -> void);
-        ifn!("llvm.dbg.value", fn(cx.metadata(), t_i64, cx.metadata()) -> void);
+        ifn!("llvm.dbg.declare", fn(cx.type_metadata(), cx.type_metadata()) -> void);
+        ifn!("llvm.dbg.value", fn(cx.type_metadata(), t_i64, cx.type_metadata()) -> void);
     }
 
     None
index f7e4d80d109e92eaec2fbf2094b80a27e84f1378..a2df65b73b1cf965f11681975e7f4a6fe705a0b6 100644 (file)
@@ -55,7 +55,7 @@ pub fn get_or_insert_gdb_debug_scripts_section_global(cx: &CodegenCx<'ll, '_>)
         let section_contents = b"\x01gdb_load_rust_pretty_printers.py\0";
 
         unsafe {
-            let llvm_type = cx.array(cx.i8(),
+            let llvm_type = cx.type_array(cx.type_i8(),
                                         section_contents.len() as u64);
 
             let section_var = declare::define_global(cx, section_var_name,
index 7a2080e1a2671c5a174a601c559f544e1ccbe9be..31022140519b72bf6b2365cbb216ae6abcb3486c 100644 (file)
 use super::backend::Backend;
 
 pub trait TypeMethods : Backend {
-    fn void(&self) -> Self::Type;
-    fn metadata(&self) -> Self::Type;
-    fn i1(&self) -> Self::Type;
-    fn i8(&self) -> Self::Type;
-    fn i16(&self) -> Self::Type;
-    fn i32(&self) -> Self::Type;
-    fn i64(&self) -> Self::Type;
-    fn i128(&self) -> Self::Type;
-    fn ix(&self, num_bites: u64) -> Self::Type;
-    fn f32(&self) -> Self::Type;
-    fn f64(&self) -> Self::Type;
-    fn x86_mmx(&self) -> Self::Type;
+    fn type_void(&self) -> Self::Type;
+    fn type_metadata(&self) -> Self::Type;
+    fn type_i1(&self) -> Self::Type;
+    fn type_i8(&self) -> Self::Type;
+    fn type_i16(&self) -> Self::Type;
+    fn type_i32(&self) -> Self::Type;
+    fn type_i64(&self) -> Self::Type;
+    fn type_i128(&self) -> Self::Type;
+    fn type_ix(&self, num_bites: u64) -> Self::Type;
+    fn type_f32(&self) -> Self::Type;
+    fn type_f64(&self) -> Self::Type;
+    fn type_x86_mmx(&self) -> Self::Type;
 
-    fn func(&self, args: &[Self::Type], ret: Self::Type) -> Self::Type;
-    fn variadic_func(&self, args: &[Self::Type], ret: Self::Type) -> Self::Type;
-    fn struct_(&self, els: &[Self::Type], packed: bool) -> Self::Type;
-    fn named_struct(&self, name: &str) -> Self::Type;
-    fn array(&self, ty: Self::Type, len: u64) -> Self::Type;
-    fn vector(&self, ty: Self::Type, len: u64) -> Self::Type;
-    fn kind(&self, ty: Self::Type) -> Self::TypeKind;
+    fn type_func(&self, args: &[Self::Type], ret: Self::Type) -> Self::Type;
+    fn type_variadic_func(&self, args: &[Self::Type], ret: Self::Type) -> Self::Type;
+    fn type_struct(&self, els: &[Self::Type], packed: bool) -> Self::Type;
+    fn type_named_struct(&self, name: &str) -> Self::Type;
+    fn type_array(&self, ty: Self::Type, len: u64) -> Self::Type;
+    fn type_vector(&self, ty: Self::Type, len: u64) -> Self::Type;
+    fn type_kind(&self, ty: Self::Type) -> Self::TypeKind;
     fn set_struct_body(&self, ty: Self::Type, els: &[Self::Type], packed: bool);
-    fn ptr_to(&self, ty: Self::Type) -> Self::Type;
+    fn type_ptr_to(&self, ty: Self::Type) -> Self::Type;
     fn element_type(&self, ty: Self::Type) -> Self::Type;
     fn vector_length(&self, ty: Self::Type) -> usize;
-    fn func_params(&self, ty: Self::Type) -> Vec<Self::Type>;
+    fn func_params_types(&self, ty: Self::Type) -> Vec<Self::Type>;
     fn float_width(&self, ty: Self::Type) -> usize;
     fn int_width(&self, ty: Self::Type) -> u64;
 }
index be03dc5e143309eb2a7a5ae43582a03bb53102d1..0af014ac681daac823d9219f9b1d91ac277f5953 100644 (file)
@@ -252,7 +252,7 @@ pub fn codegen_intrinsic_call(
             let tp_ty = substs.type_at(0);
             let mut ptr = args[0].immediate();
             if let PassMode::Cast(ty) = fn_ty.ret.mode {
-                ptr = bx.pointercast(ptr, bx.cx().ptr_to(ty.llvm_type(cx)));
+                ptr = bx.pointercast(ptr, bx.cx().type_ptr_to(ty.llvm_type(cx)));
             }
             let load = bx.volatile_load(ptr);
             let align = if name == "unaligned_volatile_load" {
@@ -338,7 +338,7 @@ pub fn codegen_intrinsic_call(
                                 args[1].immediate()
                             ], None);
                             let val = bx.extract_value(pair, 0);
-                            let overflow = bx.zext(bx.extract_value(pair, 1), cx.bool());
+                            let overflow = bx.zext(bx.extract_value(pair, 1), cx.type_bool());
 
                             let dest = result.project_field(bx, 0);
                             bx.store(val, dest.llval, dest.align);
@@ -388,7 +388,7 @@ pub fn codegen_intrinsic_call(
                             } else {
                                 // rotate_left: (X << (S % BW)) | (X >> ((BW - S) % BW))
                                 // rotate_right: (X << ((BW - S) % BW)) | (X >> (S % BW))
-                                let width = cx.const_uint(cx.ix(width), width);
+                                let width = cx.const_uint(cx.type_ix(width), width);
                                 let shift = bx.urem(raw_shift, width);
                                 let inv_shift = bx.urem(bx.sub(width, raw_shift), width);
                                 let shift1 = bx.shl(val, if is_left { shift } else { inv_shift });
@@ -495,7 +495,7 @@ pub fn codegen_intrinsic_call(
                             failorder,
                             weak);
                         let val = bx.extract_value(pair, 0);
-                        let success = bx.zext(bx.extract_value(pair, 1), bx.cx().bool());
+                        let success = bx.zext(bx.extract_value(pair, 1), bx.cx().type_bool());
 
                         let dest = result.project_field(bx, 0);
                         bx.store(val, dest.llval, dest.align);
@@ -582,32 +582,32 @@ fn one<T>(x: Vec<T>) -> T {
             fn ty_to_type(cx: &CodegenCx<'ll, '_>, t: &intrinsics::Type) -> Vec<&'ll Type> {
                 use intrinsics::Type::*;
                 match *t {
-                    Void => vec![cx.void()],
+                    Void => vec![cx.type_void()],
                     Integer(_signed, _width, llvm_width) => {
-                        vec![cx.ix( llvm_width as u64)]
+                        vec![cx.type_ix( llvm_width as u64)]
                     }
                     Float(x) => {
                         match x {
-                            32 => vec![cx.f32()],
-                            64 => vec![cx.f64()],
+                            32 => vec![cx.type_f32()],
+                            64 => vec![cx.type_f64()],
                             _ => bug!()
                         }
                     }
                     Pointer(ref t, ref llvm_elem, _const) => {
                         let t = llvm_elem.as_ref().unwrap_or(t);
                         let elem = one(ty_to_type(cx, t));
-                        vec![cx.ptr_to(elem)]
+                        vec![cx.type_ptr_to(elem)]
                     }
                     Vector(ref t, ref llvm_elem, length) => {
                         let t = llvm_elem.as_ref().unwrap_or(t);
                         let elem = one(ty_to_type(cx, t));
-                        vec![cx.vector(elem, length as u64)]
+                        vec![cx.type_vector(elem, length as u64)]
                     }
                     Aggregate(false, ref contents) => {
                         let elems = contents.iter()
                                             .map(|t| one(ty_to_type(cx, t)))
                                             .collect::<Vec<_>>();
-                        vec![cx.struct_( &elems, false)]
+                        vec![cx.type_struct( &elems, false)]
                     }
                     Aggregate(true, ref contents) => {
                         contents.iter()
@@ -646,20 +646,20 @@ fn modify_as_needed(
                     }
                     intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => {
                         let llvm_elem = one(ty_to_type(bx.cx(), llvm_elem));
-                        vec![bx.pointercast(arg.immediate(), bx.cx().ptr_to(llvm_elem))]
+                        vec![bx.pointercast(arg.immediate(), bx.cx().type_ptr_to(llvm_elem))]
                     }
                     intrinsics::Type::Vector(_, Some(ref llvm_elem), length) => {
                         let llvm_elem = one(ty_to_type(bx.cx(), llvm_elem));
                         vec![
                             bx.bitcast(arg.immediate(),
-                            bx.cx().vector(llvm_elem, length as u64))
+                            bx.cx().type_vector(llvm_elem, length as u64))
                         ]
                     }
                     intrinsics::Type::Integer(_, width, llvm_width) if width != llvm_width => {
                         // the LLVM intrinsic uses a smaller integer
                         // size than the C intrinsic's signature, so
                         // we have to trim it down here.
-                        vec![bx.trunc(arg.immediate(), bx.cx().ix(llvm_width as u64))]
+                        vec![bx.trunc(arg.immediate(), bx.cx().type_ix(llvm_width as u64))]
                     }
                     _ => vec![arg.immediate()],
                 }
@@ -681,7 +681,7 @@ fn modify_as_needed(
                 intrinsics::IntrinsicDef::Named(name) => {
                     let f = declare::declare_cfn(cx,
                                                  name,
-                                                 cx.func(&inputs, outputs));
+                                                 cx.type_func(&inputs, outputs));
                     bx.call(f, &llargs, None)
                 }
             };
@@ -705,7 +705,7 @@ fn modify_as_needed(
 
     if !fn_ty.ret.is_ignore() {
         if let PassMode::Cast(ty) = fn_ty.ret.mode {
-            let ptr = bx.pointercast(result.llval, cx.ptr_to(ty.llvm_type(cx)));
+            let ptr = bx.pointercast(result.llval, cx.type_ptr_to(ty.llvm_type(cx)));
             bx.store(llval, ptr, result.align);
         } else {
             OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout)
@@ -727,8 +727,8 @@ fn copy_intrinsic(
     let (size, align) = cx.size_and_align_of(ty);
     let size = cx.const_usize(size.bytes());
     let align = align.abi();
-    let dst_ptr = bx.pointercast(dst, cx.i8p());
-    let src_ptr = bx.pointercast(src, cx.i8p());
+    let dst_ptr = bx.pointercast(dst, cx.type_i8p());
+    let src_ptr = bx.pointercast(src, cx.type_i8p());
     if allow_overlap {
         bx.memmove(dst_ptr, align, src_ptr, align, bx.mul(size, count), volatile)
     } else {
@@ -748,7 +748,7 @@ fn memset_intrinsic(
     let (size, align) = cx.size_and_align_of(ty);
     let size = cx.const_usize(size.bytes());
     let align = cx.const_i32(align.abi() as i32);
-    let dst = bx.pointercast(dst, cx.i8p());
+    let dst = bx.pointercast(dst, cx.type_i8p());
     call_memset(bx, dst, val, bx.mul(size, count), align, volatile)
 }
 
@@ -763,7 +763,7 @@ fn try_intrinsic(
     if bx.sess().no_landing_pads() {
         bx.call(func, &[data], None);
         let ptr_align = bx.tcx().data_layout.pointer_align;
-        bx.store(cx.const_null(cx.i8p()), dest, ptr_align);
+        bx.store(cx.const_null(cx.type_i8p()), dest, ptr_align);
     } else if wants_msvc_seh(bx.sess()) {
         codegen_msvc_try(bx, cx, func, data, local_ptr, dest);
     } else {
@@ -839,7 +839,7 @@ fn codegen_msvc_try(
         //      }
         //
         // More information can be found in libstd's seh.rs implementation.
-        let i64p = cx.ptr_to(cx.i64());
+        let i64p = cx.type_ptr_to(cx.type_i64());
         let ptr_align = bx.tcx().data_layout.pointer_align;
         let slot = bx.alloca(i64p, "slot", ptr_align);
         bx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(), None);
@@ -930,12 +930,12 @@ fn codegen_gnu_try(
         // being thrown.  The second value is a "selector" indicating which of
         // the landing pad clauses the exception's type had been matched to.
         // rust_try ignores the selector.
-        let lpad_ty = cx.struct_(&[cx.i8p(), cx.i32()], false);
+        let lpad_ty = cx.type_struct(&[cx.type_i8p(), cx.type_i32()], false);
         let vals = catch.landing_pad(lpad_ty, bx.cx().eh_personality(), 1);
-        catch.add_clause(vals, bx.cx().const_null(cx.i8p()));
+        catch.add_clause(vals, bx.cx().const_null(cx.type_i8p()));
         let ptr = catch.extract_value(vals, 0);
         let ptr_align = bx.tcx().data_layout.pointer_align;
-        catch.store(ptr, catch.bitcast(local_ptr, cx.ptr_to(cx.i8p())), ptr_align);
+        catch.store(ptr, catch.bitcast(local_ptr, cx.type_ptr_to(cx.type_i8p())), ptr_align);
         catch.ret(cx.const_i32(1));
     });
 
@@ -1078,7 +1078,7 @@ macro_rules! require_simd {
                   found `{}` with length {}",
                  in_len, in_ty,
                  ret_ty, out_len);
-        require!(bx.cx().kind(bx.cx().element_type(llret_ty)) == TypeKind::Integer,
+        require!(bx.cx().type_kind(bx.cx().element_type(llret_ty)) == TypeKind::Integer,
                  "expected return type with integer elements, found `{}` with non-integer `{}`",
                  ret_ty,
                  ret_ty.simd_type(tcx));
@@ -1167,8 +1167,8 @@ macro_rules! require_simd {
             _ => return_error!("mask element type is `{}`, expected `i_`", m_elem_ty)
         }
         // truncate the mask to a vector of i1s
-        let i1 = bx.cx().i1();
-        let i1xn = bx.cx().vector(i1, m_len as u64);
+        let i1 = bx.cx().type_i1();
+        let i1xn = bx.cx().type_vector(i1, m_len as u64);
         let m_i1s = bx.trunc(args[0].immediate(), i1xn);
         return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
     }
@@ -1300,16 +1300,16 @@ fn llvm_vector_ty(cx: &CodegenCx<'ll, '_>, elem_ty: ty::Ty, vec_len: usize,
                       mut no_pointers: usize) -> &'ll Type {
         // FIXME: use cx.layout_of(ty).llvm_type() ?
         let mut elem_ty = match elem_ty.sty {
-            ty::Int(v) => cx.int_from_ty( v),
-            ty::Uint(v) => cx.uint_from_ty( v),
-            ty::Float(v) => cx.float_from_ty( v),
+            ty::Int(v) => cx.type_int_from_ty( v),
+            ty::Uint(v) => cx.type_uint_from_ty( v),
+            ty::Float(v) => cx.type_float_from_ty( v),
             _ => unreachable!(),
         };
         while no_pointers > 0 {
-            elem_ty = cx.ptr_to(elem_ty);
+            elem_ty = cx.type_ptr_to(elem_ty);
             no_pointers -= 1;
         }
-        cx.vector(elem_ty, vec_len as u64)
+        cx.type_vector(elem_ty, vec_len as u64)
     }
 
 
@@ -1386,13 +1386,13 @@ fn non_ptr(t: ty::Ty) -> ty::Ty {
         }
 
         // Alignment of T, must be a constant integer value:
-        let alignment_ty = bx.cx().i32();
+        let alignment_ty = bx.cx().type_i32();
         let alignment = bx.cx().const_i32(bx.cx().align_of(in_elem).abi() as i32);
 
         // Truncate the mask vector to a vector of i1s:
         let (mask, mask_ty) = {
-            let i1 = bx.cx().i1();
-            let i1xn = bx.cx().vector(i1, in_len as u64);
+            let i1 = bx.cx().type_i1();
+            let i1xn = bx.cx().type_vector(i1, in_len as u64);
             (bx.trunc(args[2].immediate(), i1xn), i1xn)
         };
 
@@ -1407,7 +1407,7 @@ fn non_ptr(t: ty::Ty) -> ty::Ty {
         let llvm_intrinsic = format!("llvm.masked.gather.{}.{}",
                                      llvm_elem_vec_str, llvm_pointer_vec_str);
         let f = declare::declare_cfn(bx.cx(), &llvm_intrinsic,
-                                     bx.cx().func(&[
+                                     bx.cx().type_func(&[
                                          llvm_pointer_vec_ty,
                                          alignment_ty,
                                          mask_ty,
@@ -1486,17 +1486,17 @@ fn non_ptr(t: ty::Ty) -> ty::Ty {
         }
 
         // Alignment of T, must be a constant integer value:
-        let alignment_ty = bx.cx().i32();
+        let alignment_ty = bx.cx().type_i32();
         let alignment = bx.cx().const_i32(bx.cx().align_of(in_elem).abi() as i32);
 
         // Truncate the mask vector to a vector of i1s:
         let (mask, mask_ty) = {
-            let i1 = bx.cx().i1();
-            let i1xn = bx.cx().vector(i1, in_len as u64);
+            let i1 = bx.cx().type_i1();
+            let i1xn = bx.cx().type_vector(i1, in_len as u64);
             (bx.trunc(args[2].immediate(), i1xn), i1xn)
         };
 
-        let ret_t = bx.cx().void();
+        let ret_t = bx.cx().type_void();
 
         // Type of the vector of pointers:
         let llvm_pointer_vec_ty = llvm_vector_ty(bx.cx(), underlying_ty, in_len, pointer_count);
@@ -1509,7 +1509,7 @@ fn non_ptr(t: ty::Ty) -> ty::Ty {
         let llvm_intrinsic = format!("llvm.masked.scatter.{}.{}",
                                      llvm_elem_vec_str, llvm_pointer_vec_str);
         let f = declare::declare_cfn(bx.cx(), &llvm_intrinsic,
-                                     bx.cx().func(&[llvm_elem_vec_ty,
+                                     bx.cx().type_func(&[llvm_elem_vec_ty,
                                                   llvm_pointer_vec_ty,
                                                   alignment_ty,
                                                   mask_ty], ret_t));
@@ -1565,8 +1565,8 @@ macro_rules! arith_red {
                         } else {
                             // unordered arithmetic reductions do not:
                             match f.bit_width() {
-                                32 => bx.cx().const_undef(bx.cx().f32()),
-                                64 => bx.cx().const_undef(bx.cx().f64()),
+                                32 => bx.cx().const_undef(bx.cx().type_f32()),
+                                64 => bx.cx().const_undef(bx.cx().type_f64()),
                                 v => {
                                     return_error!(r#"
 unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
@@ -1643,8 +1643,8 @@ macro_rules! bitwise_red {
                     }
 
                     // boolean reductions operate on vectors of i1s:
-                    let i1 = bx.cx().i1();
-                    let i1xn = bx.cx().vector(i1, in_len as u64);
+                    let i1 = bx.cx().type_i1();
+                    let i1xn = bx.cx().type_vector(i1, in_len as u64);
                     bx.trunc(args[0].immediate(), i1xn)
                 };
                 return match in_elem.sty {
@@ -1654,7 +1654,7 @@ macro_rules! bitwise_red {
                             if !$boolean {
                                 r
                             } else {
-                                bx.zext(r, bx.cx().bool())
+                                bx.zext(r, bx.cx().type_bool())
                             }
                         )
                     },
index 7f76b9b1efa5499ce40dad2489b73170ad44d04a..73c220dbfda271960bffe3b4f67731e4d8b27ca2 100644 (file)
@@ -42,7 +42,7 @@ pub fn get_fn(self, bx: &Builder<'a, 'll, 'tcx>,
 
         let llvtable = bx.pointercast(
             llvtable,
-            bx.cx().ptr_to(fn_ty.ptr_to_llvm_type(bx.cx()))
+            bx.cx().type_ptr_to(fn_ty.ptr_to_llvm_type(bx.cx()))
         );
         let ptr_align = bx.tcx().data_layout.pointer_align;
         let ptr = bx.load(
@@ -63,7 +63,7 @@ pub fn get_usize(
         // Load the data pointer from the object.
         debug!("get_int({:?}, {:?})", llvtable, self);
 
-        let llvtable = bx.pointercast(llvtable, bx.cx().ptr_to(bx.cx().isize()));
+        let llvtable = bx.pointercast(llvtable, bx.cx().type_ptr_to(bx.cx().type_isize()));
         let usize_align = bx.tcx().data_layout.pointer_align;
         let ptr = bx.load(
             bx.inbounds_gep(llvtable, &[bx.cx().const_usize(self.0)]),
@@ -98,7 +98,7 @@ pub fn get_vtable(
     }
 
     // Not in the cache. Build it.
-    let nullptr = cx.const_null(cx.i8p());
+    let nullptr = cx.const_null(cx.type_i8p());
 
     let methods = tcx.vtable_methods(trait_ref.with_self_ty(tcx, ty));
     let methods = methods.iter().cloned().map(|opt_mth| {
index 27a0b9c5682bda59346ed2d9c6ba8916c1441865..781271ffaa705a106c25948d969b8f6bcfd7514b 100644 (file)
@@ -268,7 +268,7 @@ fn codegen_terminator(&mut self,
                             }
                         };
                         bx.load(
-                            bx.pointercast(llslot, bx.cx().ptr_to(cast_ty.llvm_type(bx.cx()))),
+                            bx.pointercast(llslot, bx.cx().type_ptr_to(cast_ty.llvm_type(bx.cx()))),
                             self.fn_ty.ret.layout.align)
                     }
                 };
@@ -560,7 +560,7 @@ fn codegen_terminator(&mut self,
                     let dest = match ret_dest {
                         _ if fn_ty.ret.is_indirect() => llargs[0],
                         ReturnDest::Nothing => {
-                            bx.cx().const_undef(bx.cx().ptr_to(fn_ty.ret.memory_ty(bx.cx())))
+                            bx.cx().const_undef(bx.cx().type_ptr_to(fn_ty.ret.memory_ty(bx.cx())))
                         }
                         ReturnDest::IndirectOperand(dst, _) |
                         ReturnDest::Store(dst) => dst.llval,
@@ -801,7 +801,7 @@ fn codegen_argument(&mut self,
         if by_ref && !arg.is_indirect() {
             // Have to load the argument, maybe while casting it.
             if let PassMode::Cast(ty) = arg.mode {
-                llval = bx.load(bx.pointercast(llval, bx.cx().ptr_to(ty.llvm_type(bx.cx()))),
+                llval = bx.load(bx.pointercast(llval, bx.cx().type_ptr_to(ty.llvm_type(bx.cx()))),
                                 align.min(arg.layout.align));
             } else {
                 // We can't use `PlaceRef::load` here because the argument
@@ -902,7 +902,7 @@ fn landing_pad_uncached(&mut self, target_bb: &'ll BasicBlock) -> &'ll BasicBloc
 
     fn landing_pad_type(&self) -> &'ll Type {
         let cx = self.cx;
-        cx.struct_( &[cx.i8p(), cx.i32()], false)
+        cx.type_struct( &[cx.type_i8p(), cx.type_i32()], false)
     }
 
     fn unreachable_block(&mut self) -> &'ll BasicBlock {
@@ -1014,7 +1014,7 @@ fn codegen_transmute_into(&mut self, bx: &Builder<'a, 'll, 'tcx>,
                               dst: PlaceRef<'tcx, &'ll Value>) {
         let src = self.codegen_operand(bx, src);
         let llty = src.layout.llvm_type(bx.cx());
-        let cast_ptr = bx.pointercast(dst.llval, bx.cx().ptr_to(llty));
+        let cast_ptr = bx.pointercast(dst.llval, bx.cx().type_ptr_to(llty));
         let align = src.layout.align.min(dst.layout.align);
         src.val.store(bx, PlaceRef::new_sized(cast_ptr, src.layout, align));
     }
index d3640c1934268e25484e975c2aba2447a83eb657..0731f27732cf9bd556f6041c3848d6ed0e6f45b2 100644 (file)
@@ -40,11 +40,11 @@ pub fn scalar_to_llvm(
     match cv {
         Scalar::Bits { size: 0, .. } => {
             assert_eq!(0, layout.value.size(cx).bytes());
-            cx.const_undef(cx.ix(0))
+            cx.const_undef(cx.type_ix(0))
         },
         Scalar::Bits { bits, size } => {
             assert_eq!(size as u64, layout.value.size(cx).bytes());
-            let llval = cx.const_uint_big(cx.ix(bitsize), bits);
+            let llval = cx.const_uint_big(cx.type_ix(bitsize), bits);
             if layout.value == layout::Pointer {
                 unsafe { llvm::LLVMConstIntToPtr(llval, llty) }
             } else {
@@ -72,7 +72,7 @@ pub fn scalar_to_llvm(
                 None => bug!("missing allocation {:?}", ptr.alloc_id),
             };
             let llval = unsafe { llvm::LLVMConstInBoundsGEP(
-                consts::bitcast(base_addr, cx.i8p()),
+                consts::bitcast(base_addr, cx.type_i8p()),
                 &cx.const_usize(ptr.offset.bytes()),
                 1,
             ) };
@@ -109,7 +109,7 @@ pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_>, alloc: &Allocation) -> &'ll
                 value: layout::Primitive::Pointer,
                 valid_range: 0..=!0
             },
-            cx.i8p()
+            cx.type_i8p()
         ));
         next_offset = offset + pointer_size;
     }
index b4880ed4888a7e4dd4bf512c809cc3a063da538f..4a31b4d3a4ba8dc9bac113ed0edb40855a2717e4 100644 (file)
@@ -419,7 +419,7 @@ fn create_funclets(
                 // C++ personality function, but `catch (...)` has no type so
                 // it's null. The 64 here is actually a bitfield which
                 // represents that this is a catch-all block.
-                let null = bx.cx().const_null(bx.cx().i8p());
+                let null = bx.cx().const_null(bx.cx().type_i8p());
                 let sixty_four = bx.cx().const_i32(64);
                 cleanup = cp_bx.catch_pad(cs, &[null, sixty_four, null]);
                 cp_bx.br(llbb);
index c2725acbf1234c020aed407e26cfe3665bb79fb7..be9107160dad2b24a70f536771fb34cc3eb63e08 100644 (file)
@@ -348,7 +348,7 @@ pub fn store_unsized(
 
         // Allocate an appropriate region on the stack, and copy the value into it
         let (llsize, _) = glue::size_and_align_of_dst(bx, unsized_ty, Some(llextra));
-        let lldst = bx.array_alloca(bx.cx().i8(), llsize, "unsized_tmp", max_align);
+        let lldst = bx.array_alloca(bx.cx().type_i8(), llsize, "unsized_tmp", max_align);
         base::call_memcpy(bx, lldst, max_align, llptr, min_align, llsize, flags);
 
         // Store the allocated region and the extra to the indirect place.
@@ -462,7 +462,7 @@ pub fn codegen_operand(&mut self,
                         // We've errored, so we don't have to produce working code.
                         let layout = bx.cx().layout_of(ty);
                         PlaceRef::new_sized(
-                            bx.cx().const_undef(bx.cx().ptr_to(layout.llvm_type(bx.cx()))),
+                            bx.cx().const_undef(bx.cx().type_ptr_to(layout.llvm_type(bx.cx()))),
                             layout,
                             layout.align,
                         ).load(bx)
index b0740008995ed7b75841305d8c81d5fd34a83e17..2570be8154e558233caef9b876422758b9d0fb9f 100644 (file)
@@ -67,11 +67,11 @@ pub fn from_const_alloc(
         let base_addr = consts::addr_of(bx.cx(), init, layout.align, None);
 
         let llval = unsafe { LLVMConstInBoundsGEP(
-            consts::bitcast(base_addr, bx.cx().i8p()),
+            consts::bitcast(base_addr, bx.cx().type_i8p()),
             &bx.cx().const_usize(offset.bytes()),
             1,
         )};
-        let llval = consts::bitcast(llval, bx.cx().ptr_to(layout.llvm_type(bx.cx())));
+        let llval = consts::bitcast(llval, bx.cx().type_ptr_to(layout.llvm_type(bx.cx())));
         PlaceRef::new_sized(llval, layout, alloc.align)
     }
 
@@ -159,7 +159,7 @@ pub fn load(&self, bx: &Builder<'a, 'll, 'tcx>) -> OperandRef<'tcx, &'ll Value>
                 let load = bx.load(llptr, self.align);
                 scalar_load_metadata(load, scalar);
                 if scalar.is_bool() {
-                    bx.trunc(load, bx.cx().i1())
+                    bx.trunc(load, bx.cx().type_i1())
                 } else {
                     load
                 }
@@ -196,7 +196,7 @@ pub fn project_field(
             };
             PlaceRef {
                 // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
-                llval: bx.pointercast(llval, cx.ptr_to(field.llvm_type(cx))),
+                llval: bx.pointercast(llval, cx.type_ptr_to(field.llvm_type(cx))),
                 llextra: if cx.type_has_metadata(field.ty) {
                     self.llextra
                 } else {
@@ -265,7 +265,7 @@ pub fn project_field(
         debug!("struct_field_ptr: DST field offset: {:?}", offset);
 
         // Cast and adjust pointer
-        let byte_ptr = bx.pointercast(self.llval, cx.i8p());
+        let byte_ptr = bx.pointercast(self.llval, cx.type_i8p());
         let byte_ptr = bx.gep(byte_ptr, &[offset]);
 
         // Finally, cast back to the type expected
@@ -273,7 +273,7 @@ pub fn project_field(
         debug!("struct_field_ptr: Field type is {:?}", ll_fty);
 
         PlaceRef {
-            llval: bx.pointercast(byte_ptr, bx.cx().ptr_to(ll_fty)),
+            llval: bx.pointercast(byte_ptr, bx.cx().type_ptr_to(ll_fty)),
             llextra: self.llextra,
             layout: field,
             align: effective_field_align,
@@ -379,7 +379,10 @@ pub fn codegen_set_discr(&self, bx: &Builder<'a, 'll, 'tcx>, variant_index: Vari
                        bx.sess().target.target.arch == "aarch64" {
                         // Issue #34427: As workaround for LLVM bug on ARM,
                         // use memset of 0 before assigning niche value.
-                        let llptr = bx.pointercast(self.llval, bx.cx().ptr_to(bx.cx().i8()));
+                        let llptr = bx.pointercast(
+                            self.llval,
+                            bx.cx().type_ptr_to(bx.cx().type_i8())
+                        );
                         let fill_byte = bx.cx().const_u8(0);
                         let (size, align) = self.layout.size_and_align();
                         let size = bx.cx().const_usize(size.bytes());
@@ -422,7 +425,7 @@ pub fn project_downcast(&self, bx: &Builder<'a, 'll, 'tcx>, variant_index: Varia
 
         // Cast to the appropriate variant struct type.
         let variant_ty = downcast.layout.llvm_type(bx.cx());
-        downcast.llval = bx.pointercast(downcast.llval, bx.cx().ptr_to(variant_ty));
+        downcast.llval = bx.pointercast(downcast.llval, bx.cx().type_ptr_to(variant_ty));
 
         downcast
     }
@@ -483,7 +486,9 @@ pub fn codegen_place(&mut self,
                         // so we generate an abort
                         let fnname = bx.cx().get_intrinsic(&("llvm.trap"));
                         bx.call(fnname, &[], None);
-                        let llval = bx.cx().const_undef(bx.cx().ptr_to(layout.llvm_type(bx.cx())));
+                        let llval = bx.cx().const_undef(
+                            bx.cx().type_ptr_to(layout.llvm_type(bx.cx()))
+                        );
                         PlaceRef::new_sized(llval, layout, layout.align)
                     }
                 }
@@ -543,7 +548,7 @@ pub fn codegen_place(&mut self,
                         // Cast the place pointer type to the new
                         // array or slice type (*[%_; new_len]).
                         subslice.llval = bx.pointercast(subslice.llval,
-                            bx.cx().ptr_to(subslice.layout.llvm_type(bx.cx())));
+                            bx.cx().type_ptr_to(subslice.layout.llvm_type(bx.cx())));
 
                         subslice
                     }
index 4dcb0bec3f49fa42af762e9fd2a17f6fda981e4f..199bb474dd12bee0e5e7d6a30c5489f1baa3b5e3 100644 (file)
@@ -117,7 +117,7 @@ pub fn codegen_rvalue(&mut self,
 
                     // Use llvm.memset.p0i8.* to initialize byte arrays
                     let v = base::from_immediate(&bx, v);
-                    if bx.cx().val_ty(v) == bx.cx().i8() {
+                    if bx.cx().val_ty(v) == bx.cx().type_i8() {
                         base::call_memset(&bx, start, v, size, align, false);
                         return bx;
                     }
index d4b137926d516a1f0dc39bcc4d7c708d11a4cd15..1ef1417ab4ebfc93f9cabd0b28451552a29c7487 100644 (file)
@@ -43,82 +43,82 @@ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
 
 impl TypeMethods for CodegenCx<'ll, 'tcx> {
 
-    fn void(&self) -> &'ll Type {
+    fn type_void(&self) -> &'ll Type {
         unsafe {
             llvm::LLVMVoidTypeInContext(self.llcx)
         }
     }
 
-    fn metadata(&self) -> &'ll Type {
+    fn type_metadata(&self) -> &'ll Type {
         unsafe {
             llvm::LLVMRustMetadataTypeInContext(self.llcx)
         }
     }
 
-    fn i1(&self) -> &'ll Type {
+    fn type_i1(&self) -> &'ll Type {
         unsafe {
             llvm::LLVMInt1TypeInContext(self.llcx)
         }
     }
 
-    fn i8(&self) -> &'ll Type {
+    fn type_i8(&self) -> &'ll Type {
         unsafe {
             llvm::LLVMInt8TypeInContext(self.llcx)
         }
     }
 
 
-    fn i16(&self) -> &'ll Type {
+    fn type_i16(&self) -> &'ll Type {
         unsafe {
 
             llvm::LLVMInt16TypeInContext(self.llcx)
         }
     }
 
-    fn i32(&self) -> &'ll Type {
+    fn type_i32(&self) -> &'ll Type {
         unsafe {
             llvm::LLVMInt32TypeInContext(self.llcx)
         }
     }
 
-    fn i64(&self) -> &'ll Type {
+    fn type_i64(&self) -> &'ll Type {
         unsafe {
             llvm::LLVMInt64TypeInContext(self.llcx)
         }
     }
 
-    fn i128(&self) -> &'ll Type {
+    fn type_i128(&self) -> &'ll Type {
         unsafe {
             llvm::LLVMIntTypeInContext(self.llcx, 128)
         }
     }
 
     // Creates an integer type with the given number of bits, e.g. i24
-    fn ix(&self, num_bits: u64) -> &'ll Type {
+    fn type_ix(&self, num_bits: u64) -> &'ll Type {
         unsafe {
             llvm::LLVMIntTypeInContext(self.llcx, num_bits as c_uint)
         }
     }
 
-    fn f32(&self) -> &'ll Type {
+    fn type_f32(&self) -> &'ll Type {
         unsafe {
             llvm::LLVMFloatTypeInContext(self.llcx)
         }
     }
 
-    fn f64(&self) -> &'ll Type {
+    fn type_f64(&self) -> &'ll Type {
         unsafe {
             llvm::LLVMDoubleTypeInContext(self.llcx)
         }
     }
 
-    fn x86_mmx(&self) -> &'ll Type {
+    fn type_x86_mmx(&self) -> &'ll Type {
         unsafe {
             llvm::LLVMX86MMXTypeInContext(self.llcx)
         }
     }
 
-    fn func(
+    fn type_func(
         &self,
         args: &[&'ll Type],
         ret: &'ll Type
@@ -129,7 +129,7 @@ fn func(
         }
     }
 
-    fn variadic_func(
+    fn type_variadic_func(
         &self,
         args: &[&'ll Type],
         ret: &'ll Type
@@ -140,7 +140,7 @@ fn variadic_func(
         }
     }
 
-    fn struct_(
+    fn type_struct(
         &self,
         els: &[&'ll Type],
         packed: bool
@@ -152,7 +152,7 @@ fn struct_(
         }
     }
 
-    fn named_struct(&self, name: &str) -> &'ll Type {
+    fn type_named_struct(&self, name: &str) -> &'ll Type {
         let name = SmallCStr::new(name);
         unsafe {
             llvm::LLVMStructCreateNamed(self.llcx, name.as_ptr())
@@ -160,19 +160,19 @@ fn named_struct(&self, name: &str) -> &'ll Type {
     }
 
 
-    fn array(&self, ty: &'ll Type, len: u64) -> &'ll Type {
+    fn type_array(&self, ty: &'ll Type, len: u64) -> &'ll Type {
         unsafe {
             llvm::LLVMRustArrayType(ty, len)
         }
     }
 
-    fn vector(&self, ty: &'ll Type, len: u64) -> &'ll Type {
+    fn type_vector(&self, ty: &'ll Type, len: u64) -> &'ll Type {
         unsafe {
             llvm::LLVMVectorType(ty, len as c_uint)
         }
     }
 
-    fn kind(&self, ty: &'ll Type) -> TypeKind {
+    fn type_kind(&self, ty: &'ll Type) -> TypeKind {
         unsafe {
             llvm::LLVMRustGetTypeKind(ty)
         }
@@ -185,8 +185,8 @@ fn set_struct_body(&self, ty: &'ll Type, els: &[&'ll Type], packed: bool) {
         }
     }
 
-    fn ptr_to(&self, ty: &'ll Type) -> &'ll Type {
-        assert_ne!(self.kind(ty), TypeKind::Function,
+    fn type_ptr_to(&self, ty: &'ll Type) -> &'ll Type {
+        assert_ne!(self.type_kind(ty), TypeKind::Function,
                    "don't call ptr_to on function types, use ptr_to_llvm_type on FnType instead");
         unsafe {
             llvm::LLVMPointerType(ty, 0)
@@ -206,7 +206,7 @@ fn vector_length(&self, ty: &'ll Type) -> usize {
         }
     }
 
-    fn func_params(&self, ty: &'ll Type) -> Vec<&'ll Type> {
+    fn func_params_types(&self, ty: &'ll Type) -> Vec<&'ll Type> {
         unsafe {
             let n_args = llvm::LLVMCountParamTypes(ty) as usize;
             let mut args = Vec::with_capacity(n_args);
@@ -217,7 +217,7 @@ fn func_params(&self, ty: &'ll Type) -> Vec<&'ll Type> {
     }
 
     fn float_width(&self, ty : &'ll Type) -> usize {
-        match self.kind(ty) {
+        match self.type_kind(ty) {
             TypeKind::Float => 32,
             TypeKind::Double => 64,
             TypeKind::X86_FP80 => 80,
@@ -252,96 +252,100 @@ pub fn ix_llcx(
     }
 
     pub fn i8p_llcx(cx : &write::CodegenContext<'ll>, llcx: &'ll llvm::Context) -> &'ll Type {
-        cx.ptr_to(Type::i8_llcx(llcx))
+        cx.type_ptr_to(Type::i8_llcx(llcx))
     }
 }
 
 impl CodegenCx<'ll, 'tcx> {
-    pub fn bool(&self) -> &'ll Type {
-        self.i8()
+    pub fn type_bool(&self) -> &'ll Type {
+        self.type_i8()
     }
 
-    pub fn i8p(&self) -> &'ll Type {
-        self.ptr_to(self.i8())
+    pub fn type_i8p(&self) -> &'ll Type {
+        self.type_ptr_to(self.type_i8())
     }
 
-    pub fn isize(&self) -> &'ll Type {
+    pub fn type_isize(&self) -> &'ll Type {
         self.isize_ty
     }
 
-    pub fn t_int(&self) -> &'ll Type {
+    pub fn type_int(&self) -> &'ll Type {
         match &self.sess().target.target.target_c_int_width[..] {
-            "16" => self.i16(),
-            "32" => self.i32(),
-            "64" => self.i64(),
+            "16" => self.type_i16(),
+            "32" => self.type_i32(),
+            "64" => self.type_i64(),
             width => bug!("Unsupported target_c_int_width: {}", width),
         }
     }
 
-    pub fn int_from_ty(
+    pub fn type_int_from_ty(
         &self,
         t: ast::IntTy
     ) -> &'ll Type {
         match t {
             ast::IntTy::Isize => self.isize_ty,
-            ast::IntTy::I8 => self.i8(),
-            ast::IntTy::I16 => self.i16(),
-            ast::IntTy::I32 => self.i32(),
-            ast::IntTy::I64 => self.i64(),
-            ast::IntTy::I128 => self.i128(),
+            ast::IntTy::I8 => self.type_i8(),
+            ast::IntTy::I16 => self.type_i16(),
+            ast::IntTy::I32 => self.type_i32(),
+            ast::IntTy::I64 => self.type_i64(),
+            ast::IntTy::I128 => self.type_i128(),
         }
     }
 
-    pub fn uint_from_ty(
+    pub fn type_uint_from_ty(
         &self,
         t: ast::UintTy
     ) -> &'ll Type {
         match t {
             ast::UintTy::Usize => self.isize_ty,
-            ast::UintTy::U8 => self.i8(),
-            ast::UintTy::U16 => self.i16(),
-            ast::UintTy::U32 => self.i32(),
-            ast::UintTy::U64 => self.i64(),
-            ast::UintTy::U128 => self.i128(),
+            ast::UintTy::U8 => self.type_i8(),
+            ast::UintTy::U16 => self.type_i16(),
+            ast::UintTy::U32 => self.type_i32(),
+            ast::UintTy::U64 => self.type_i64(),
+            ast::UintTy::U128 => self.type_i128(),
         }
     }
 
-    pub fn float_from_ty(
+    pub fn type_float_from_ty(
         &self,
         t: ast::FloatTy
     ) -> &'ll Type {
         match t {
-            ast::FloatTy::F32 => self.f32(),
-            ast::FloatTy::F64 => self.f64(),
+            ast::FloatTy::F32 => self.type_f32(),
+            ast::FloatTy::F64 => self.type_f64(),
         }
     }
 
-    pub fn from_integer(&self, i: layout::Integer) -> &'ll Type {
+    pub fn type_from_integer(&self, i: layout::Integer) -> &'ll Type {
         use rustc::ty::layout::Integer::*;
         match i {
-            I8 => self.i8(),
-            I16 => self.i16(),
-            I32 => self.i32(),
-            I64 => self.i64(),
-            I128 => self.i128(),
+            I8 => self.type_i8(),
+            I16 => self.type_i16(),
+            I32 => self.type_i32(),
+            I64 => self.type_i64(),
+            I128 => self.type_i128(),
         }
     }
 
     /// Return a LLVM type that has at most the required alignment,
     /// as a conservative approximation for unknown pointee types.
-    pub fn pointee_for_abi_align(&self, align: Align) -> &'ll Type {
+    pub fn type_pointee_for_abi_align(&self, align: Align) -> &'ll Type {
         // FIXME(eddyb) We could find a better approximation if ity.align < align.
         let ity = layout::Integer::approximate_abi_align(self, align);
-        self.from_integer(ity)
+        self.type_from_integer(ity)
     }
 
     /// Return a LLVM type that has at most the required alignment,
     /// and exactly the required size, as a best-effort padding array.
-    pub fn padding_filler(&self, size: Size, align: Align) -> &'ll Type {
+    pub fn type_padding_filler(
+        &self,
+        size: Size,
+        align: Align
+    ) -> &'ll Type {
         let unit = layout::Integer::approximate_abi_align(self, align);
         let size = size.bytes();
         let unit_size = unit.size().bytes();
         assert_eq!(size % unit_size, 0);
-        self.array(self.from_integer(unit), size / unit_size)
+        self.type_array(self.type_from_integer(unit), size / unit_size)
     }
 }
index 4cc3d216abac0b2c1cdad1fef775fe72b6522ada..5f961cf125f3af091abb4eb533417677c3e85611 100644 (file)
@@ -38,14 +38,14 @@ fn uncached_llvm_type<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
                 (cx.sess().target.target.arch == "x86" ||
                  cx.sess().target.target.arch == "x86_64");
             if use_x86_mmx {
-                return cx.x86_mmx()
+                return cx.type_x86_mmx()
             } else {
                 let element = layout.scalar_llvm_type_at(cx, element, Size::ZERO);
-                return cx.vector(element, count);
+                return cx.type_vector(element, count);
             }
         }
         layout::Abi::ScalarPair(..) => {
-            return cx.struct_( &[
+            return cx.type_struct( &[
                 layout.scalar_pair_element_llvm_type(cx, 0, false),
                 layout.scalar_pair_element_llvm_type(cx, 1, false),
             ], false);
@@ -80,30 +80,30 @@ fn uncached_llvm_type<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
 
     match layout.fields {
         layout::FieldPlacement::Union(_) => {
-            let fill = cx.padding_filler( layout.size, layout.align);
+            let fill = cx.type_padding_filler( layout.size, layout.align);
             let packed = false;
             match name {
                 None => {
-                    cx.struct_( &[fill], packed)
+                    cx.type_struct( &[fill], packed)
                 }
                 Some(ref name) => {
-                    let llty = cx.named_struct( name);
+                    let llty = cx.type_named_struct( name);
                     cx.set_struct_body(llty, &[fill], packed);
                     llty
                 }
             }
         }
         layout::FieldPlacement::Array { count, .. } => {
-            cx.array(layout.field(cx, 0).llvm_type(cx), count)
+            cx.type_array(layout.field(cx, 0).llvm_type(cx), count)
         }
         layout::FieldPlacement::Arbitrary { .. } => {
             match name {
                 None => {
                     let (llfields, packed) = struct_llfields(cx, layout);
-                    cx.struct_( &llfields, packed)
+                    cx.type_struct( &llfields, packed)
                 }
                 Some(ref name) => {
-                    let llty = cx.named_struct( name);
+                    let llty = cx.type_named_struct( name);
                     *defer = Some((llty, layout));
                     llty
                 }
@@ -137,7 +137,7 @@ fn struct_llfields<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
         let padding = target_offset - offset;
         let padding_align = prev_effective_align.min(effective_field_align);
         assert_eq!(offset.abi_align(padding_align) + padding, target_offset);
-        result.push(cx.padding_filler( padding, padding_align));
+        result.push(cx.type_padding_filler( padding, padding_align));
         debug!("    padding before: {:?}", padding);
 
         result.push(field.llvm_type(cx));
@@ -154,7 +154,7 @@ fn struct_llfields<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
         assert_eq!(offset.abi_align(padding_align) + padding, layout.size);
         debug!("struct_llfields: pad_bytes: {:?} offset: {:?} stride: {:?}",
                padding, offset, layout.size);
-        result.push(cx.padding_filler(padding, padding_align));
+        result.push(cx.type_padding_filler(padding, padding_align));
         assert_eq!(result.len(), 1 + field_count * 2);
     } else {
         debug!("struct_llfields: offset: {:?} stride: {:?}",
@@ -256,10 +256,10 @@ fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
             let llty = match self.ty.sty {
                 ty::Ref(_, ty, _) |
                 ty::RawPtr(ty::TypeAndMut { ty, .. }) => {
-                    cx.ptr_to(cx.layout_of(ty).llvm_type(cx))
+                    cx.type_ptr_to(cx.layout_of(ty).llvm_type(cx))
                 }
                 ty::Adt(def, _) if def.is_box() => {
-                    cx.ptr_to(cx.layout_of(self.ty.boxed_ty()).llvm_type(cx))
+                    cx.type_ptr_to(cx.layout_of(self.ty.boxed_ty()).llvm_type(cx))
                 }
                 ty::FnPtr(sig) => {
                     let sig = cx.tcx.normalize_erasing_late_bound_regions(
@@ -317,7 +317,7 @@ fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
     fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
         if let layout::Abi::Scalar(ref scalar) = self.abi {
             if scalar.is_bool() {
-                return cx.i1();
+                return cx.type_i1();
             }
         }
         self.llvm_type(cx)
@@ -326,17 +326,17 @@ fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
     fn scalar_llvm_type_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>,
                                scalar: &layout::Scalar, offset: Size) -> &'a Type {
         match scalar.value {
-            layout::Int(i, _) => cx.from_integer( i),
-            layout::Float(FloatTy::F32) => cx.f32(),
-            layout::Float(FloatTy::F64) => cx.f64(),
+            layout::Int(i, _) => cx.type_from_integer( i),
+            layout::Float(FloatTy::F32) => cx.type_f32(),
+            layout::Float(FloatTy::F64) => cx.type_f64(),
             layout::Pointer => {
                 // If we know the alignment, pick something better than i8.
                 let pointee = if let Some(pointee) = self.pointee_info_at(cx, offset) {
-                    cx.pointee_for_abi_align( pointee.align)
+                    cx.type_pointee_for_abi_align( pointee.align)
                 } else {
-                    cx.i8()
+                    cx.type_i8()
                 };
-                cx.ptr_to(pointee)
+                cx.type_ptr_to(pointee)
             }
         }
     }
@@ -370,7 +370,7 @@ fn scalar_pair_element_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>,
         // when immediate.  We need to load/store `bool` as `i8` to avoid
         // crippling LLVM optimizations or triggering other LLVM bugs with `i1`.
         if immediate && scalar.is_bool() {
-            return cx.i1();
+            return cx.type_i1();
         }
 
         let offset = if index == 0 {