impl LlvmType for Reg {
fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type {
match self.kind {
- RegKind::Integer => cx.ix(self.size.bits()),
+ RegKind::Integer => cx.type_ix(self.size.bits()),
RegKind::Float => {
match self.size.bits() {
- 32 => cx.f32(),
- 64 => cx.f64(),
+ 32 => cx.type_f32(),
+ 64 => cx.type_f64(),
_ => bug!("unsupported float: {:?}", self)
}
}
RegKind::Vector => {
- cx.vector(cx.i8(), self.size.bytes())
+ cx.type_vector(cx.type_i8(), self.size.bytes())
}
}
}
// Simplify to array when all chunks are the same size and type
if rem_bytes == 0 {
- return cx.array(rest_ll_unit, rest_count);
+ return cx.type_array(rest_ll_unit, rest_count);
}
}
if rem_bytes != 0 {
// Only integers can be really split further.
assert_eq!(self.rest.unit.kind, RegKind::Integer);
- args.push(cx.ix(rem_bytes * 8));
+ args.push(cx.type_ix(rem_bytes * 8));
}
- cx.struct_(&args, false)
+ cx.type_struct(&args, false)
}
}
// uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
let can_store_through_cast_ptr = false;
if can_store_through_cast_ptr {
- let cast_dst = bx.pointercast(dst.llval, cx.ptr_to(cast.llvm_type(cx)));
+ let cast_dst = bx.pointercast(dst.llval, cx.type_ptr_to(cast.llvm_type(cx)));
bx.store(val, cast_dst, self.layout.align);
} else {
// The actual return type is a struct, but the ABI
// ...and then memcpy it to the intended destination.
base::call_memcpy(bx,
- bx.pointercast(dst.llval, cx.i8p()),
+ bx.pointercast(dst.llval, cx.type_i8p()),
self.layout.align,
- bx.pointercast(llscratch, cx.i8p()),
+ bx.pointercast(llscratch, cx.type_i8p()),
scratch_align,
cx.const_usize(self.layout.size.bytes()),
MemFlags::empty());
);
let llreturn_ty = match self.ret.mode {
- PassMode::Ignore => cx.void(),
+ PassMode::Ignore => cx.type_void(),
PassMode::Direct(_) | PassMode::Pair(..) => {
self.ret.layout.immediate_llvm_type(cx)
}
PassMode::Cast(cast) => cast.llvm_type(cx),
PassMode::Indirect(..) => {
- llargument_tys.push(cx.ptr_to(self.ret.memory_ty(cx)));
- cx.void()
+ llargument_tys.push(cx.type_ptr_to(self.ret.memory_ty(cx)));
+ cx.type_void()
}
};
continue;
}
PassMode::Cast(cast) => cast.llvm_type(cx),
- PassMode::Indirect(_, None) => cx.ptr_to(arg.memory_ty(cx)),
+ PassMode::Indirect(_, None) => cx.type_ptr_to(arg.memory_ty(cx)),
};
llargument_tys.push(llarg_ty);
}
if self.variadic {
- cx.variadic_func(&llargument_tys, llreturn_ty)
+ cx.type_variadic_func(&llargument_tys, llreturn_ty)
} else {
- cx.func(&llargument_tys, llreturn_ty)
+ cx.type_func(&llargument_tys, llreturn_ty)
}
}
// Depending on how many outputs we have, the return type is different
let num_outputs = output_types.len();
let output_type = match num_outputs {
- 0 => bx.cx().void(),
+ 0 => bx.cx().type_void(),
1 => output_types[0],
- _ => bx.cx().struct_(&output_types, false)
+ _ => bx.cx().type_struct(&output_types, false)
};
let asm = CString::new(ia.asm.as_str().as_bytes()).unwrap();
use rustc::session::Session;
use rustc::util::nodemap::FxHashMap;
use time_graph::{self, TimeGraph, Timeline};
-use llvm::{self, DiagnosticInfo, PassManager, SMDiagnostic, BasicBlock};
+use llvm::{self, DiagnosticInfo, PassManager, SMDiagnostic};
use llvm_util;
use {CodegenResults, ModuleCodegen, CompiledModule, ModuleKind, // ModuleLlvm,
CachedModuleCodegen};
use syntax_pos::symbol::Symbol;
use type_::Type;
use context::{is_pie_binary, get_reloc_model};
-use interfaces::{Backend, CommonWriteMethods};
use common;
use jobserver::{Client, Acquired};
use rustc_demangle;
}
}
-impl<'ll> Backend for CodegenContext<'ll> {
- type Value = &'ll Value;
- type BasicBlock = &'ll BasicBlock;
- type Type = &'ll Type;
- type Context = &'ll llvm::Context;
- type TypeKind = llvm::TypeKind;
-}
-impl CommonWriteMethods for CodegenContext<'ll> {
+impl CodegenContext<'ll> {
fn val_ty(&self, v: &'ll Value) -> &'ll Type {
common::val_ty(v)
}
common::const_bytes_in_context(llcx, bytes)
}
- fn const_struct_in_context(
- &self,
- llcx: &'a llvm::Context,
- elts: &[&'a Value],
- packed: bool,
- ) -> &'a Value {
- common::const_struct_in_context(llcx, elts, packed)
- }
-}
-
-impl CodegenContext<'ll> {
- pub fn ptr_to(&self, ty: &'ll Type) -> &'ll Type {
+ pub fn type_ptr_to(&self, ty: &'ll Type) -> &'ll Type {
unsafe {
llvm::LLVMPointerType(ty, 0)
}
(&ty::RawPtr(ty::TypeAndMut { ty: a, .. }),
&ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => {
assert!(bx.cx().type_is_sized(a));
- let ptr_ty = bx.cx().ptr_to(bx.cx().layout_of(b).llvm_type(bx.cx()));
+ let ptr_ty = bx.cx().type_ptr_to(bx.cx().layout_of(b).llvm_type(bx.cx()));
(bx.pointercast(src, ptr_ty), unsized_info(bx.cx(), a, b, None))
}
(&ty::Adt(def_a, _), &ty::Adt(def_b, _)) if def_a.is_box() && def_b.is_box() => {
let (a, b) = (src_ty.boxed_ty(), dst_ty.boxed_ty());
assert!(bx.cx().type_is_sized(a));
- let ptr_ty = bx.cx().ptr_to(bx.cx().layout_of(b).llvm_type(bx.cx()));
+ let ptr_ty = bx.cx().type_ptr_to(bx.cx().layout_of(b).llvm_type(bx.cx()));
(bx.pointercast(src, ptr_ty), unsized_info(bx.cx(), a, b, None))
}
(&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
if op.is_shift() {
let mut rhs_llty = bx.cx().val_ty(rhs);
let mut lhs_llty = bx.cx().val_ty(lhs);
- if bx.cx().kind(rhs_llty) == TypeKind::Vector {
+ if bx.cx().type_kind(rhs_llty) == TypeKind::Vector {
rhs_llty = bx.cx().element_type(rhs_llty)
}
- if bx.cx().kind(lhs_llty) == TypeKind::Vector {
+ if bx.cx().type_kind(lhs_llty) == TypeKind::Vector {
lhs_llty = bx.cx().element_type(lhs_llty)
}
let rhs_sz = bx.cx().int_width(rhs_llty);
bx: &Builder<'_ ,'ll, '_, &'ll Value>,
val: &'ll Value
) -> &'ll Value {
- if bx.cx().val_ty(val) == bx.cx().i1() {
- bx.zext(val, bx.cx().i8())
+ if bx.cx().val_ty(val) == bx.cx().type_i1() {
+ bx.zext(val, bx.cx().type_i8())
} else {
val
}
scalar: &layout::Scalar,
) -> &'ll Value {
if scalar.is_bool() {
- return bx.trunc(val, bx.cx().i1());
+ return bx.trunc(val, bx.cx().type_i1());
}
val
}
if flags.contains(MemFlags::NONTEMPORAL) {
// HACK(nox): This is inefficient but there is no nontemporal memcpy.
let val = bx.load(src, src_align);
- let ptr = bx.pointercast(dst, bx.cx().ptr_to(bx.cx().val_ty(val)));
+ let ptr = bx.pointercast(dst, bx.cx().type_ptr_to(bx.cx().val_ty(val)));
bx.store_with_flags(val, ptr, dst_align, flags);
return;
}
let cx = bx.cx();
- let src_ptr = bx.pointercast(src, cx.i8p());
- let dst_ptr = bx.pointercast(dst, cx.i8p());
+ let src_ptr = bx.pointercast(src, cx.type_i8p());
+ let dst_ptr = bx.pointercast(dst, cx.type_i8p());
let size = bx.intcast(n_bytes, cx.isize_ty, false);
let volatile = flags.contains(MemFlags::VOLATILE);
bx.memcpy(dst_ptr, dst_align.abi(), src_ptr, src_align.abi(), size, volatile);
use_start_lang_item: bool,
) {
let llfty =
- cx.func(&[cx.t_int(), cx.ptr_to(cx.i8p())], cx.t_int());
+ cx.type_func(&[cx.type_int(), cx.type_ptr_to(cx.type_i8p())], cx.type_int());
let main_ret_ty = cx.tcx.fn_sig(rust_main_def_id).output();
// Given that `main()` has no arguments,
start_def_id,
cx.tcx.intern_substs(&[main_ret_ty.into()]),
);
- (start_fn, vec![bx.pointercast(rust_main, cx.ptr_to(cx.i8p())),
+ (start_fn, vec![bx.pointercast(rust_main, cx.type_ptr_to(cx.type_i8p())),
arg_argc, arg_argv])
} else {
debug!("using user-defined start fn");
};
let result = bx.call(start_fn, &args, None);
- bx.ret(bx.intcast(result, cx.t_int(), true));
+ bx.ret(bx.intcast(result, cx.type_int(), true));
}
}
if !cx.used_statics.borrow().is_empty() {
let name = const_cstr!("llvm.used");
let section = const_cstr!("llvm.metadata");
- let array = cx.const_array(&cx.ptr_to(cx.i8()), &*cx.used_statics.borrow());
+ let array = cx.const_array(
+ &cx.type_ptr_to(cx.type_i8()),
+ &*cx.used_statics.borrow()
+ );
unsafe {
let g = llvm::LLVMAddGlobal(cx.llmod,
}).collect::<Vec<_>>();
debug!("Asm Output Type: {:?}", output);
- let fty = &self.cx().func(&argtys[..], output);
+ let fty = &self.cx().type_func(&argtys[..], output);
unsafe {
// Ask LLVM to verify that the constraints are well-formed.
let constraints_ok = llvm::LLVMRustInlineAsmVerify(fty, cons);
fn vector_splat(&self, num_elts: usize, elt: &'ll Value) -> &'ll Value {
unsafe {
let elt_ty = self.cx.val_ty(elt);
- let undef = llvm::LLVMGetUndef(&self.cx().vector(elt_ty, num_elts as u64));
+ let undef = llvm::LLVMGetUndef(&self.cx().type_vector(elt_ty, num_elts as u64));
let vec = self.insert_element(undef, elt, self.cx.const_i32(0));
- let vec_i32_ty = &self.cx().vector(&self.cx().i32(), num_elts as u64);
+ let vec_i32_ty = &self.cx().type_vector(&self.cx().type_i32(), num_elts as u64);
self.shuffle_vector(vec, undef, self.cx().const_null(vec_i32_ty))
}
}
ptr: &'ll Value) -> &'ll Value {
let dest_ptr_ty = self.cx.val_ty(ptr);
let stored_ty = self.cx.val_ty(val);
- let stored_ptr_ty = self.cx.ptr_to(stored_ty);
+ let stored_ptr_ty = self.cx.type_ptr_to(stored_ty);
- assert_eq!(self.cx.kind(dest_ptr_ty), llvm::TypeKind::Pointer);
+ assert_eq!(self.cx.type_kind(dest_ptr_ty), llvm::TypeKind::Pointer);
if dest_ptr_ty == stored_ptr_ty {
ptr
args: &'b [&'ll Value]) -> Cow<'b, [&'ll Value]> {
let mut fn_ty = self.cx.val_ty(llfn);
// Strip off pointers
- while self.cx.kind(fn_ty) == llvm::TypeKind::Pointer {
+ while self.cx.type_kind(fn_ty) == llvm::TypeKind::Pointer {
fn_ty = self.cx.element_type(fn_ty);
}
- assert!(self.cx.kind(fn_ty) == llvm::TypeKind::Function,
+ assert!(self.cx.type_kind(fn_ty) == llvm::TypeKind::Function,
"builder::{} not passed a function, but {:?}", typ, fn_ty);
- let param_tys = self.cx.func_params(fn_ty);
+ let param_tys = self.cx.func_params_types(fn_ty);
let all_args_match = param_tys.iter()
.zip(args.iter().map(|&v| self.cx().val_ty(v)))
let lifetime_intrinsic = self.cx.get_intrinsic(intrinsic);
- let ptr = self.pointercast(ptr, self.cx.i8p());
+ let ptr = self.pointercast(ptr, self.cx.type_i8p());
self.call(lifetime_intrinsic, &[self.cx.const_u64(size), ptr], None);
}
}
fn const_bool(&self, val: bool) -> &'ll Value {
- &self.const_uint(&self.i1(), val as u64)
+ &self.const_uint(&self.type_i1(), val as u64)
}
fn const_i32(&self, i: i32) -> &'ll Value {
- &self.const_int(&self.i32(), i as i64)
+ &self.const_int(&self.type_i32(), i as i64)
}
fn const_u32(&self, i: u32) -> &'ll Value {
- &self.const_uint(&self.i32(), i as u64)
+ &self.const_uint(&self.type_i32(), i as u64)
}
fn const_u64(&self, i: u64) -> &'ll Value {
- &self.const_uint(&self.i64(), i)
+ &self.const_uint(&self.type_i64(), i)
}
fn const_usize(&self, i: u64) -> &'ll Value {
}
fn const_u8(&self, i: u8) -> &'ll Value {
- &self.const_uint(&self.i8(), i as u64)
+ &self.const_uint(&self.type_i8(), i as u64)
}
fn const_str_slice(&self, s: LocalInternedString) -> &'ll Value {
let len = s.len();
let cs = consts::ptrcast(&self.const_cstr(s, false),
- &self.ptr_to(&self.layout_of(&self.tcx.mk_str()).llvm_type(&self)));
+ &self.type_ptr_to(&self.layout_of(&self.tcx.mk_str()).llvm_type(&self)));
&self.const_fat_ptr(cs, &self.const_usize(len as u64))
}
mask_llty: &'ll Type,
invert: bool
) -> &'ll Value {
- let kind = bx.cx().kind(llty);
+ let kind = bx.cx().type_kind(llty);
match kind {
TypeKind::Integer => {
// i8/u8 can shift by at most 7, i16/u16 by at most 15, etc.
// boolean SSA values are i1, but they have to be stored in i8 slots,
// otherwise some LLVM optimization passes don't work as expected
let mut val_llty = cx.val_ty(v);
- let v = if val_llty == cx.i1() {
- val_llty = cx.i8();
+ let v = if val_llty == cx.type_i1() {
+ val_llty = cx.type_i8();
llvm::LLVMConstZExt(v, val_llty)
} else {
v
if attrs.flags.contains(CodegenFnAttrFlags::USED) {
// This static will be stored in the llvm.used variable which is an array of i8*
- let cast = llvm::LLVMConstPointerCast(g, cx.i8p());
+ let cast = llvm::LLVMConstPointerCast(g, cx.type_i8p());
cx.used_statics.borrow_mut().push(cast);
}
}
} else {
"rust_eh_personality"
};
- let fty = &self.variadic_func(&[], &self.i32());
+ let fty = &self.type_variadic_func(&[], &self.type_i32());
declare::declare_cfn(self, name, fty)
}
};
macro_rules! ifn {
($name:expr, fn() -> $ret:expr) => (
if key == $name {
- let f = declare::declare_cfn(cx, $name, cx.func(&[], $ret));
+ let f = declare::declare_cfn(cx, $name, cx.type_func(&[], $ret));
llvm::SetUnnamedAddr(f, false);
cx.intrinsics.borrow_mut().insert($name, f.clone());
return Some(f);
);
($name:expr, fn(...) -> $ret:expr) => (
if key == $name {
- let f = declare::declare_cfn(cx, $name, cx.variadic_func(&[], $ret));
+ let f = declare::declare_cfn(cx, $name, cx.type_variadic_func(&[], $ret));
llvm::SetUnnamedAddr(f, false);
cx.intrinsics.borrow_mut().insert($name, f.clone());
return Some(f);
);
($name:expr, fn($($arg:expr),*) -> $ret:expr) => (
if key == $name {
- let f = declare::declare_cfn(cx, $name, cx.func(&[$($arg),*], $ret));
+ let f = declare::declare_cfn(cx, $name, cx.type_func(&[$($arg),*], $ret));
llvm::SetUnnamedAddr(f, false);
cx.intrinsics.borrow_mut().insert($name, f.clone());
return Some(f);
);
}
macro_rules! mk_struct {
- ($($field_ty:expr),*) => (cx.struct_( &[$($field_ty),*], false))
+ ($($field_ty:expr),*) => (cx.type_struct( &[$($field_ty),*], false))
}
- let i8p = cx.i8p();
- let void = cx.void();
- let i1 = cx.i1();
- let t_i8 = cx.i8();
- let t_i16 = cx.i16();
- let t_i32 = cx.i32();
- let t_i64 = cx.i64();
- let t_i128 = cx.i128();
- let t_f32 = cx.f32();
- let t_f64 = cx.f64();
-
- let t_v2f32 = cx.vector(t_f32, 2);
- let t_v4f32 = cx.vector(t_f32, 4);
- let t_v8f32 = cx.vector(t_f32, 8);
- let t_v16f32 = cx.vector(t_f32, 16);
-
- let t_v2f64 = cx.vector(t_f64, 2);
- let t_v4f64 = cx.vector(t_f64, 4);
- let t_v8f64 = cx.vector(t_f64, 8);
+ let i8p = cx.type_i8p();
+ let void = cx.type_void();
+ let i1 = cx.type_i1();
+ let t_i8 = cx.type_i8();
+ let t_i16 = cx.type_i16();
+ let t_i32 = cx.type_i32();
+ let t_i64 = cx.type_i64();
+ let t_i128 = cx.type_i128();
+ let t_f32 = cx.type_f32();
+ let t_f64 = cx.type_f64();
+
+ let t_v2f32 = cx.type_vector(t_f32, 2);
+ let t_v4f32 = cx.type_vector(t_f32, 4);
+ let t_v8f32 = cx.type_vector(t_f32, 8);
+ let t_v16f32 = cx.type_vector(t_f32, 16);
+
+ let t_v2f64 = cx.type_vector(t_f64, 2);
+ let t_v4f64 = cx.type_vector(t_f64, 4);
+ let t_v8f64 = cx.type_vector(t_f64, 8);
ifn!("llvm.memset.p0i8.i16", fn(i8p, t_i8, t_i16, t_i32, i1) -> void);
ifn!("llvm.memset.p0i8.i32", fn(i8p, t_i8, t_i32, t_i32, i1) -> void);
ifn!("llvm.prefetch", fn(i8p, t_i32, t_i32, t_i32) -> void);
if cx.sess().opts.debuginfo != DebugInfo::None {
- ifn!("llvm.dbg.declare", fn(cx.metadata(), cx.metadata()) -> void);
- ifn!("llvm.dbg.value", fn(cx.metadata(), t_i64, cx.metadata()) -> void);
+ ifn!("llvm.dbg.declare", fn(cx.type_metadata(), cx.type_metadata()) -> void);
+ ifn!("llvm.dbg.value", fn(cx.type_metadata(), t_i64, cx.type_metadata()) -> void);
}
None
let section_contents = b"\x01gdb_load_rust_pretty_printers.py\0";
unsafe {
- let llvm_type = cx.array(cx.i8(),
+ let llvm_type = cx.type_array(cx.type_i8(),
section_contents.len() as u64);
let section_var = declare::define_global(cx, section_var_name,
use super::backend::Backend;
pub trait TypeMethods : Backend {
- fn void(&self) -> Self::Type;
- fn metadata(&self) -> Self::Type;
- fn i1(&self) -> Self::Type;
- fn i8(&self) -> Self::Type;
- fn i16(&self) -> Self::Type;
- fn i32(&self) -> Self::Type;
- fn i64(&self) -> Self::Type;
- fn i128(&self) -> Self::Type;
- fn ix(&self, num_bites: u64) -> Self::Type;
- fn f32(&self) -> Self::Type;
- fn f64(&self) -> Self::Type;
- fn x86_mmx(&self) -> Self::Type;
+ fn type_void(&self) -> Self::Type;
+ fn type_metadata(&self) -> Self::Type;
+ fn type_i1(&self) -> Self::Type;
+ fn type_i8(&self) -> Self::Type;
+ fn type_i16(&self) -> Self::Type;
+ fn type_i32(&self) -> Self::Type;
+ fn type_i64(&self) -> Self::Type;
+ fn type_i128(&self) -> Self::Type;
+ fn type_ix(&self, num_bites: u64) -> Self::Type;
+ fn type_f32(&self) -> Self::Type;
+ fn type_f64(&self) -> Self::Type;
+ fn type_x86_mmx(&self) -> Self::Type;
- fn func(&self, args: &[Self::Type], ret: Self::Type) -> Self::Type;
- fn variadic_func(&self, args: &[Self::Type], ret: Self::Type) -> Self::Type;
- fn struct_(&self, els: &[Self::Type], packed: bool) -> Self::Type;
- fn named_struct(&self, name: &str) -> Self::Type;
- fn array(&self, ty: Self::Type, len: u64) -> Self::Type;
- fn vector(&self, ty: Self::Type, len: u64) -> Self::Type;
- fn kind(&self, ty: Self::Type) -> Self::TypeKind;
+ fn type_func(&self, args: &[Self::Type], ret: Self::Type) -> Self::Type;
+ fn type_variadic_func(&self, args: &[Self::Type], ret: Self::Type) -> Self::Type;
+ fn type_struct(&self, els: &[Self::Type], packed: bool) -> Self::Type;
+ fn type_named_struct(&self, name: &str) -> Self::Type;
+ fn type_array(&self, ty: Self::Type, len: u64) -> Self::Type;
+ fn type_vector(&self, ty: Self::Type, len: u64) -> Self::Type;
+ fn type_kind(&self, ty: Self::Type) -> Self::TypeKind;
fn set_struct_body(&self, ty: Self::Type, els: &[Self::Type], packed: bool);
- fn ptr_to(&self, ty: Self::Type) -> Self::Type;
+ fn type_ptr_to(&self, ty: Self::Type) -> Self::Type;
fn element_type(&self, ty: Self::Type) -> Self::Type;
fn vector_length(&self, ty: Self::Type) -> usize;
- fn func_params(&self, ty: Self::Type) -> Vec<Self::Type>;
+ fn func_params_types(&self, ty: Self::Type) -> Vec<Self::Type>;
fn float_width(&self, ty: Self::Type) -> usize;
fn int_width(&self, ty: Self::Type) -> u64;
}
let tp_ty = substs.type_at(0);
let mut ptr = args[0].immediate();
if let PassMode::Cast(ty) = fn_ty.ret.mode {
- ptr = bx.pointercast(ptr, bx.cx().ptr_to(ty.llvm_type(cx)));
+ ptr = bx.pointercast(ptr, bx.cx().type_ptr_to(ty.llvm_type(cx)));
}
let load = bx.volatile_load(ptr);
let align = if name == "unaligned_volatile_load" {
args[1].immediate()
], None);
let val = bx.extract_value(pair, 0);
- let overflow = bx.zext(bx.extract_value(pair, 1), cx.bool());
+ let overflow = bx.zext(bx.extract_value(pair, 1), cx.type_bool());
let dest = result.project_field(bx, 0);
bx.store(val, dest.llval, dest.align);
} else {
// rotate_left: (X << (S % BW)) | (X >> ((BW - S) % BW))
// rotate_right: (X << ((BW - S) % BW)) | (X >> (S % BW))
- let width = cx.const_uint(cx.ix(width), width);
+ let width = cx.const_uint(cx.type_ix(width), width);
let shift = bx.urem(raw_shift, width);
let inv_shift = bx.urem(bx.sub(width, raw_shift), width);
let shift1 = bx.shl(val, if is_left { shift } else { inv_shift });
failorder,
weak);
let val = bx.extract_value(pair, 0);
- let success = bx.zext(bx.extract_value(pair, 1), bx.cx().bool());
+ let success = bx.zext(bx.extract_value(pair, 1), bx.cx().type_bool());
let dest = result.project_field(bx, 0);
bx.store(val, dest.llval, dest.align);
fn ty_to_type(cx: &CodegenCx<'ll, '_>, t: &intrinsics::Type) -> Vec<&'ll Type> {
use intrinsics::Type::*;
match *t {
- Void => vec![cx.void()],
+ Void => vec![cx.type_void()],
Integer(_signed, _width, llvm_width) => {
- vec![cx.ix( llvm_width as u64)]
+ vec![cx.type_ix( llvm_width as u64)]
}
Float(x) => {
match x {
- 32 => vec![cx.f32()],
- 64 => vec![cx.f64()],
+ 32 => vec![cx.type_f32()],
+ 64 => vec![cx.type_f64()],
_ => bug!()
}
}
Pointer(ref t, ref llvm_elem, _const) => {
let t = llvm_elem.as_ref().unwrap_or(t);
let elem = one(ty_to_type(cx, t));
- vec![cx.ptr_to(elem)]
+ vec![cx.type_ptr_to(elem)]
}
Vector(ref t, ref llvm_elem, length) => {
let t = llvm_elem.as_ref().unwrap_or(t);
let elem = one(ty_to_type(cx, t));
- vec![cx.vector(elem, length as u64)]
+ vec![cx.type_vector(elem, length as u64)]
}
Aggregate(false, ref contents) => {
let elems = contents.iter()
.map(|t| one(ty_to_type(cx, t)))
.collect::<Vec<_>>();
- vec![cx.struct_( &elems, false)]
+ vec![cx.type_struct( &elems, false)]
}
Aggregate(true, ref contents) => {
contents.iter()
}
intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => {
let llvm_elem = one(ty_to_type(bx.cx(), llvm_elem));
- vec![bx.pointercast(arg.immediate(), bx.cx().ptr_to(llvm_elem))]
+ vec![bx.pointercast(arg.immediate(), bx.cx().type_ptr_to(llvm_elem))]
}
intrinsics::Type::Vector(_, Some(ref llvm_elem), length) => {
let llvm_elem = one(ty_to_type(bx.cx(), llvm_elem));
vec![
bx.bitcast(arg.immediate(),
- bx.cx().vector(llvm_elem, length as u64))
+ bx.cx().type_vector(llvm_elem, length as u64))
]
}
intrinsics::Type::Integer(_, width, llvm_width) if width != llvm_width => {
// the LLVM intrinsic uses a smaller integer
// size than the C intrinsic's signature, so
// we have to trim it down here.
- vec![bx.trunc(arg.immediate(), bx.cx().ix(llvm_width as u64))]
+ vec![bx.trunc(arg.immediate(), bx.cx().type_ix(llvm_width as u64))]
}
_ => vec![arg.immediate()],
}
intrinsics::IntrinsicDef::Named(name) => {
let f = declare::declare_cfn(cx,
name,
- cx.func(&inputs, outputs));
+ cx.type_func(&inputs, outputs));
bx.call(f, &llargs, None)
}
};
if !fn_ty.ret.is_ignore() {
if let PassMode::Cast(ty) = fn_ty.ret.mode {
- let ptr = bx.pointercast(result.llval, cx.ptr_to(ty.llvm_type(cx)));
+ let ptr = bx.pointercast(result.llval, cx.type_ptr_to(ty.llvm_type(cx)));
bx.store(llval, ptr, result.align);
} else {
OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout)
let (size, align) = cx.size_and_align_of(ty);
let size = cx.const_usize(size.bytes());
let align = align.abi();
- let dst_ptr = bx.pointercast(dst, cx.i8p());
- let src_ptr = bx.pointercast(src, cx.i8p());
+ let dst_ptr = bx.pointercast(dst, cx.type_i8p());
+ let src_ptr = bx.pointercast(src, cx.type_i8p());
if allow_overlap {
bx.memmove(dst_ptr, align, src_ptr, align, bx.mul(size, count), volatile)
} else {
let (size, align) = cx.size_and_align_of(ty);
let size = cx.const_usize(size.bytes());
let align = cx.const_i32(align.abi() as i32);
- let dst = bx.pointercast(dst, cx.i8p());
+ let dst = bx.pointercast(dst, cx.type_i8p());
call_memset(bx, dst, val, bx.mul(size, count), align, volatile)
}
if bx.sess().no_landing_pads() {
bx.call(func, &[data], None);
let ptr_align = bx.tcx().data_layout.pointer_align;
- bx.store(cx.const_null(cx.i8p()), dest, ptr_align);
+ bx.store(cx.const_null(cx.type_i8p()), dest, ptr_align);
} else if wants_msvc_seh(bx.sess()) {
codegen_msvc_try(bx, cx, func, data, local_ptr, dest);
} else {
// }
//
// More information can be found in libstd's seh.rs implementation.
- let i64p = cx.ptr_to(cx.i64());
+ let i64p = cx.type_ptr_to(cx.type_i64());
let ptr_align = bx.tcx().data_layout.pointer_align;
let slot = bx.alloca(i64p, "slot", ptr_align);
bx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(), None);
// being thrown. The second value is a "selector" indicating which of
// the landing pad clauses the exception's type had been matched to.
// rust_try ignores the selector.
- let lpad_ty = cx.struct_(&[cx.i8p(), cx.i32()], false);
+ let lpad_ty = cx.type_struct(&[cx.type_i8p(), cx.type_i32()], false);
let vals = catch.landing_pad(lpad_ty, bx.cx().eh_personality(), 1);
- catch.add_clause(vals, bx.cx().const_null(cx.i8p()));
+ catch.add_clause(vals, bx.cx().const_null(cx.type_i8p()));
let ptr = catch.extract_value(vals, 0);
let ptr_align = bx.tcx().data_layout.pointer_align;
- catch.store(ptr, catch.bitcast(local_ptr, cx.ptr_to(cx.i8p())), ptr_align);
+ catch.store(ptr, catch.bitcast(local_ptr, cx.type_ptr_to(cx.type_i8p())), ptr_align);
catch.ret(cx.const_i32(1));
});
found `{}` with length {}",
in_len, in_ty,
ret_ty, out_len);
- require!(bx.cx().kind(bx.cx().element_type(llret_ty)) == TypeKind::Integer,
+ require!(bx.cx().type_kind(bx.cx().element_type(llret_ty)) == TypeKind::Integer,
"expected return type with integer elements, found `{}` with non-integer `{}`",
ret_ty,
ret_ty.simd_type(tcx));
_ => return_error!("mask element type is `{}`, expected `i_`", m_elem_ty)
}
// truncate the mask to a vector of i1s
- let i1 = bx.cx().i1();
- let i1xn = bx.cx().vector(i1, m_len as u64);
+ let i1 = bx.cx().type_i1();
+ let i1xn = bx.cx().type_vector(i1, m_len as u64);
let m_i1s = bx.trunc(args[0].immediate(), i1xn);
return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
}
mut no_pointers: usize) -> &'ll Type {
// FIXME: use cx.layout_of(ty).llvm_type() ?
let mut elem_ty = match elem_ty.sty {
- ty::Int(v) => cx.int_from_ty( v),
- ty::Uint(v) => cx.uint_from_ty( v),
- ty::Float(v) => cx.float_from_ty( v),
+ ty::Int(v) => cx.type_int_from_ty( v),
+ ty::Uint(v) => cx.type_uint_from_ty( v),
+ ty::Float(v) => cx.type_float_from_ty( v),
_ => unreachable!(),
};
while no_pointers > 0 {
- elem_ty = cx.ptr_to(elem_ty);
+ elem_ty = cx.type_ptr_to(elem_ty);
no_pointers -= 1;
}
- cx.vector(elem_ty, vec_len as u64)
+ cx.type_vector(elem_ty, vec_len as u64)
}
}
// Alignment of T, must be a constant integer value:
- let alignment_ty = bx.cx().i32();
+ let alignment_ty = bx.cx().type_i32();
let alignment = bx.cx().const_i32(bx.cx().align_of(in_elem).abi() as i32);
// Truncate the mask vector to a vector of i1s:
let (mask, mask_ty) = {
- let i1 = bx.cx().i1();
- let i1xn = bx.cx().vector(i1, in_len as u64);
+ let i1 = bx.cx().type_i1();
+ let i1xn = bx.cx().type_vector(i1, in_len as u64);
(bx.trunc(args[2].immediate(), i1xn), i1xn)
};
let llvm_intrinsic = format!("llvm.masked.gather.{}.{}",
llvm_elem_vec_str, llvm_pointer_vec_str);
let f = declare::declare_cfn(bx.cx(), &llvm_intrinsic,
- bx.cx().func(&[
+ bx.cx().type_func(&[
llvm_pointer_vec_ty,
alignment_ty,
mask_ty,
}
// Alignment of T, must be a constant integer value:
- let alignment_ty = bx.cx().i32();
+ let alignment_ty = bx.cx().type_i32();
let alignment = bx.cx().const_i32(bx.cx().align_of(in_elem).abi() as i32);
// Truncate the mask vector to a vector of i1s:
let (mask, mask_ty) = {
- let i1 = bx.cx().i1();
- let i1xn = bx.cx().vector(i1, in_len as u64);
+ let i1 = bx.cx().type_i1();
+ let i1xn = bx.cx().type_vector(i1, in_len as u64);
(bx.trunc(args[2].immediate(), i1xn), i1xn)
};
- let ret_t = bx.cx().void();
+ let ret_t = bx.cx().type_void();
// Type of the vector of pointers:
let llvm_pointer_vec_ty = llvm_vector_ty(bx.cx(), underlying_ty, in_len, pointer_count);
let llvm_intrinsic = format!("llvm.masked.scatter.{}.{}",
llvm_elem_vec_str, llvm_pointer_vec_str);
let f = declare::declare_cfn(bx.cx(), &llvm_intrinsic,
- bx.cx().func(&[llvm_elem_vec_ty,
+ bx.cx().type_func(&[llvm_elem_vec_ty,
llvm_pointer_vec_ty,
alignment_ty,
mask_ty], ret_t));
} else {
// unordered arithmetic reductions do not:
match f.bit_width() {
- 32 => bx.cx().const_undef(bx.cx().f32()),
- 64 => bx.cx().const_undef(bx.cx().f64()),
+ 32 => bx.cx().const_undef(bx.cx().type_f32()),
+ 64 => bx.cx().const_undef(bx.cx().type_f64()),
v => {
return_error!(r#"
unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
}
// boolean reductions operate on vectors of i1s:
- let i1 = bx.cx().i1();
- let i1xn = bx.cx().vector(i1, in_len as u64);
+ let i1 = bx.cx().type_i1();
+ let i1xn = bx.cx().type_vector(i1, in_len as u64);
bx.trunc(args[0].immediate(), i1xn)
};
return match in_elem.sty {
if !$boolean {
r
} else {
- bx.zext(r, bx.cx().bool())
+ bx.zext(r, bx.cx().type_bool())
}
)
},
let llvtable = bx.pointercast(
llvtable,
- bx.cx().ptr_to(fn_ty.ptr_to_llvm_type(bx.cx()))
+ bx.cx().type_ptr_to(fn_ty.ptr_to_llvm_type(bx.cx()))
);
let ptr_align = bx.tcx().data_layout.pointer_align;
let ptr = bx.load(
// Load the data pointer from the object.
debug!("get_int({:?}, {:?})", llvtable, self);
- let llvtable = bx.pointercast(llvtable, bx.cx().ptr_to(bx.cx().isize()));
+ let llvtable = bx.pointercast(llvtable, bx.cx().type_ptr_to(bx.cx().type_isize()));
let usize_align = bx.tcx().data_layout.pointer_align;
let ptr = bx.load(
bx.inbounds_gep(llvtable, &[bx.cx().const_usize(self.0)]),
}
// Not in the cache. Build it.
- let nullptr = cx.const_null(cx.i8p());
+ let nullptr = cx.const_null(cx.type_i8p());
let methods = tcx.vtable_methods(trait_ref.with_self_ty(tcx, ty));
let methods = methods.iter().cloned().map(|opt_mth| {
}
};
bx.load(
- bx.pointercast(llslot, bx.cx().ptr_to(cast_ty.llvm_type(bx.cx()))),
+ bx.pointercast(llslot, bx.cx().type_ptr_to(cast_ty.llvm_type(bx.cx()))),
self.fn_ty.ret.layout.align)
}
};
let dest = match ret_dest {
_ if fn_ty.ret.is_indirect() => llargs[0],
ReturnDest::Nothing => {
- bx.cx().const_undef(bx.cx().ptr_to(fn_ty.ret.memory_ty(bx.cx())))
+ bx.cx().const_undef(bx.cx().type_ptr_to(fn_ty.ret.memory_ty(bx.cx())))
}
ReturnDest::IndirectOperand(dst, _) |
ReturnDest::Store(dst) => dst.llval,
if by_ref && !arg.is_indirect() {
// Have to load the argument, maybe while casting it.
if let PassMode::Cast(ty) = arg.mode {
- llval = bx.load(bx.pointercast(llval, bx.cx().ptr_to(ty.llvm_type(bx.cx()))),
+ llval = bx.load(bx.pointercast(llval, bx.cx().type_ptr_to(ty.llvm_type(bx.cx()))),
align.min(arg.layout.align));
} else {
// We can't use `PlaceRef::load` here because the argument
fn landing_pad_type(&self) -> &'ll Type {
let cx = self.cx;
- cx.struct_( &[cx.i8p(), cx.i32()], false)
+ cx.type_struct( &[cx.type_i8p(), cx.type_i32()], false)
}
fn unreachable_block(&mut self) -> &'ll BasicBlock {
dst: PlaceRef<'tcx, &'ll Value>) {
let src = self.codegen_operand(bx, src);
let llty = src.layout.llvm_type(bx.cx());
- let cast_ptr = bx.pointercast(dst.llval, bx.cx().ptr_to(llty));
+ let cast_ptr = bx.pointercast(dst.llval, bx.cx().type_ptr_to(llty));
let align = src.layout.align.min(dst.layout.align);
src.val.store(bx, PlaceRef::new_sized(cast_ptr, src.layout, align));
}
match cv {
Scalar::Bits { size: 0, .. } => {
assert_eq!(0, layout.value.size(cx).bytes());
- cx.const_undef(cx.ix(0))
+ cx.const_undef(cx.type_ix(0))
},
Scalar::Bits { bits, size } => {
assert_eq!(size as u64, layout.value.size(cx).bytes());
- let llval = cx.const_uint_big(cx.ix(bitsize), bits);
+ let llval = cx.const_uint_big(cx.type_ix(bitsize), bits);
if layout.value == layout::Pointer {
unsafe { llvm::LLVMConstIntToPtr(llval, llty) }
} else {
None => bug!("missing allocation {:?}", ptr.alloc_id),
};
let llval = unsafe { llvm::LLVMConstInBoundsGEP(
- consts::bitcast(base_addr, cx.i8p()),
+ consts::bitcast(base_addr, cx.type_i8p()),
&cx.const_usize(ptr.offset.bytes()),
1,
) };
value: layout::Primitive::Pointer,
valid_range: 0..=!0
},
- cx.i8p()
+ cx.type_i8p()
));
next_offset = offset + pointer_size;
}
// C++ personality function, but `catch (...)` has no type so
// it's null. The 64 here is actually a bitfield which
// represents that this is a catch-all block.
- let null = bx.cx().const_null(bx.cx().i8p());
+ let null = bx.cx().const_null(bx.cx().type_i8p());
let sixty_four = bx.cx().const_i32(64);
cleanup = cp_bx.catch_pad(cs, &[null, sixty_four, null]);
cp_bx.br(llbb);
// Allocate an appropriate region on the stack, and copy the value into it
let (llsize, _) = glue::size_and_align_of_dst(bx, unsized_ty, Some(llextra));
- let lldst = bx.array_alloca(bx.cx().i8(), llsize, "unsized_tmp", max_align);
+ let lldst = bx.array_alloca(bx.cx().type_i8(), llsize, "unsized_tmp", max_align);
base::call_memcpy(bx, lldst, max_align, llptr, min_align, llsize, flags);
// Store the allocated region and the extra to the indirect place.
// We've errored, so we don't have to produce working code.
let layout = bx.cx().layout_of(ty);
PlaceRef::new_sized(
- bx.cx().const_undef(bx.cx().ptr_to(layout.llvm_type(bx.cx()))),
+ bx.cx().const_undef(bx.cx().type_ptr_to(layout.llvm_type(bx.cx()))),
layout,
layout.align,
).load(bx)
let base_addr = consts::addr_of(bx.cx(), init, layout.align, None);
let llval = unsafe { LLVMConstInBoundsGEP(
- consts::bitcast(base_addr, bx.cx().i8p()),
+ consts::bitcast(base_addr, bx.cx().type_i8p()),
&bx.cx().const_usize(offset.bytes()),
1,
)};
- let llval = consts::bitcast(llval, bx.cx().ptr_to(layout.llvm_type(bx.cx())));
+ let llval = consts::bitcast(llval, bx.cx().type_ptr_to(layout.llvm_type(bx.cx())));
PlaceRef::new_sized(llval, layout, alloc.align)
}
let load = bx.load(llptr, self.align);
scalar_load_metadata(load, scalar);
if scalar.is_bool() {
- bx.trunc(load, bx.cx().i1())
+ bx.trunc(load, bx.cx().type_i1())
} else {
load
}
};
PlaceRef {
// HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
- llval: bx.pointercast(llval, cx.ptr_to(field.llvm_type(cx))),
+ llval: bx.pointercast(llval, cx.type_ptr_to(field.llvm_type(cx))),
llextra: if cx.type_has_metadata(field.ty) {
self.llextra
} else {
debug!("struct_field_ptr: DST field offset: {:?}", offset);
// Cast and adjust pointer
- let byte_ptr = bx.pointercast(self.llval, cx.i8p());
+ let byte_ptr = bx.pointercast(self.llval, cx.type_i8p());
let byte_ptr = bx.gep(byte_ptr, &[offset]);
// Finally, cast back to the type expected
debug!("struct_field_ptr: Field type is {:?}", ll_fty);
PlaceRef {
- llval: bx.pointercast(byte_ptr, bx.cx().ptr_to(ll_fty)),
+ llval: bx.pointercast(byte_ptr, bx.cx().type_ptr_to(ll_fty)),
llextra: self.llextra,
layout: field,
align: effective_field_align,
bx.sess().target.target.arch == "aarch64" {
// Issue #34427: As workaround for LLVM bug on ARM,
// use memset of 0 before assigning niche value.
- let llptr = bx.pointercast(self.llval, bx.cx().ptr_to(bx.cx().i8()));
+ let llptr = bx.pointercast(
+ self.llval,
+ bx.cx().type_ptr_to(bx.cx().type_i8())
+ );
let fill_byte = bx.cx().const_u8(0);
let (size, align) = self.layout.size_and_align();
let size = bx.cx().const_usize(size.bytes());
// Cast to the appropriate variant struct type.
let variant_ty = downcast.layout.llvm_type(bx.cx());
- downcast.llval = bx.pointercast(downcast.llval, bx.cx().ptr_to(variant_ty));
+ downcast.llval = bx.pointercast(downcast.llval, bx.cx().type_ptr_to(variant_ty));
downcast
}
// so we generate an abort
let fnname = bx.cx().get_intrinsic(&("llvm.trap"));
bx.call(fnname, &[], None);
- let llval = bx.cx().const_undef(bx.cx().ptr_to(layout.llvm_type(bx.cx())));
+ let llval = bx.cx().const_undef(
+ bx.cx().type_ptr_to(layout.llvm_type(bx.cx()))
+ );
PlaceRef::new_sized(llval, layout, layout.align)
}
}
// Cast the place pointer type to the new
// array or slice type (*[%_; new_len]).
subslice.llval = bx.pointercast(subslice.llval,
- bx.cx().ptr_to(subslice.layout.llvm_type(bx.cx())));
+ bx.cx().type_ptr_to(subslice.layout.llvm_type(bx.cx())));
subslice
}
// Use llvm.memset.p0i8.* to initialize byte arrays
let v = base::from_immediate(&bx, v);
- if bx.cx().val_ty(v) == bx.cx().i8() {
+ if bx.cx().val_ty(v) == bx.cx().type_i8() {
base::call_memset(&bx, start, v, size, align, false);
return bx;
}
impl TypeMethods for CodegenCx<'ll, 'tcx> {
- fn void(&self) -> &'ll Type {
+ fn type_void(&self) -> &'ll Type {
unsafe {
llvm::LLVMVoidTypeInContext(self.llcx)
}
}
- fn metadata(&self) -> &'ll Type {
+ fn type_metadata(&self) -> &'ll Type {
unsafe {
llvm::LLVMRustMetadataTypeInContext(self.llcx)
}
}
- fn i1(&self) -> &'ll Type {
+ fn type_i1(&self) -> &'ll Type {
unsafe {
llvm::LLVMInt1TypeInContext(self.llcx)
}
}
- fn i8(&self) -> &'ll Type {
+ fn type_i8(&self) -> &'ll Type {
unsafe {
llvm::LLVMInt8TypeInContext(self.llcx)
}
}
- fn i16(&self) -> &'ll Type {
+ fn type_i16(&self) -> &'ll Type {
unsafe {
llvm::LLVMInt16TypeInContext(self.llcx)
}
}
- fn i32(&self) -> &'ll Type {
+ fn type_i32(&self) -> &'ll Type {
unsafe {
llvm::LLVMInt32TypeInContext(self.llcx)
}
}
- fn i64(&self) -> &'ll Type {
+ fn type_i64(&self) -> &'ll Type {
unsafe {
llvm::LLVMInt64TypeInContext(self.llcx)
}
}
- fn i128(&self) -> &'ll Type {
+ fn type_i128(&self) -> &'ll Type {
unsafe {
llvm::LLVMIntTypeInContext(self.llcx, 128)
}
}
// Creates an integer type with the given number of bits, e.g. i24
- fn ix(&self, num_bits: u64) -> &'ll Type {
+ fn type_ix(&self, num_bits: u64) -> &'ll Type {
unsafe {
llvm::LLVMIntTypeInContext(self.llcx, num_bits as c_uint)
}
}
- fn f32(&self) -> &'ll Type {
+ fn type_f32(&self) -> &'ll Type {
unsafe {
llvm::LLVMFloatTypeInContext(self.llcx)
}
}
- fn f64(&self) -> &'ll Type {
+ fn type_f64(&self) -> &'ll Type {
unsafe {
llvm::LLVMDoubleTypeInContext(self.llcx)
}
}
- fn x86_mmx(&self) -> &'ll Type {
+ fn type_x86_mmx(&self) -> &'ll Type {
unsafe {
llvm::LLVMX86MMXTypeInContext(self.llcx)
}
}
- fn func(
+ fn type_func(
&self,
args: &[&'ll Type],
ret: &'ll Type
}
}
- fn variadic_func(
+ fn type_variadic_func(
&self,
args: &[&'ll Type],
ret: &'ll Type
}
}
- fn struct_(
+ fn type_struct(
&self,
els: &[&'ll Type],
packed: bool
}
}
- fn named_struct(&self, name: &str) -> &'ll Type {
+ fn type_named_struct(&self, name: &str) -> &'ll Type {
let name = SmallCStr::new(name);
unsafe {
llvm::LLVMStructCreateNamed(self.llcx, name.as_ptr())
}
- fn array(&self, ty: &'ll Type, len: u64) -> &'ll Type {
+ fn type_array(&self, ty: &'ll Type, len: u64) -> &'ll Type {
unsafe {
llvm::LLVMRustArrayType(ty, len)
}
}
- fn vector(&self, ty: &'ll Type, len: u64) -> &'ll Type {
+ fn type_vector(&self, ty: &'ll Type, len: u64) -> &'ll Type {
unsafe {
llvm::LLVMVectorType(ty, len as c_uint)
}
}
- fn kind(&self, ty: &'ll Type) -> TypeKind {
+ fn type_kind(&self, ty: &'ll Type) -> TypeKind {
unsafe {
llvm::LLVMRustGetTypeKind(ty)
}
}
}
- fn ptr_to(&self, ty: &'ll Type) -> &'ll Type {
- assert_ne!(self.kind(ty), TypeKind::Function,
+ fn type_ptr_to(&self, ty: &'ll Type) -> &'ll Type {
+ assert_ne!(self.type_kind(ty), TypeKind::Function,
"don't call ptr_to on function types, use ptr_to_llvm_type on FnType instead");
unsafe {
llvm::LLVMPointerType(ty, 0)
}
}
- fn func_params(&self, ty: &'ll Type) -> Vec<&'ll Type> {
+ fn func_params_types(&self, ty: &'ll Type) -> Vec<&'ll Type> {
unsafe {
let n_args = llvm::LLVMCountParamTypes(ty) as usize;
let mut args = Vec::with_capacity(n_args);
}
fn float_width(&self, ty : &'ll Type) -> usize {
- match self.kind(ty) {
+ match self.type_kind(ty) {
TypeKind::Float => 32,
TypeKind::Double => 64,
TypeKind::X86_FP80 => 80,
}
pub fn i8p_llcx(cx : &write::CodegenContext<'ll>, llcx: &'ll llvm::Context) -> &'ll Type {
- cx.ptr_to(Type::i8_llcx(llcx))
+ cx.type_ptr_to(Type::i8_llcx(llcx))
}
}
impl CodegenCx<'ll, 'tcx> {
- pub fn bool(&self) -> &'ll Type {
- self.i8()
+ pub fn type_bool(&self) -> &'ll Type {
+ self.type_i8()
}
- pub fn i8p(&self) -> &'ll Type {
- self.ptr_to(self.i8())
+ pub fn type_i8p(&self) -> &'ll Type {
+ self.type_ptr_to(self.type_i8())
}
- pub fn isize(&self) -> &'ll Type {
+ pub fn type_isize(&self) -> &'ll Type {
self.isize_ty
}
- pub fn t_int(&self) -> &'ll Type {
+ pub fn type_int(&self) -> &'ll Type {
match &self.sess().target.target.target_c_int_width[..] {
- "16" => self.i16(),
- "32" => self.i32(),
- "64" => self.i64(),
+ "16" => self.type_i16(),
+ "32" => self.type_i32(),
+ "64" => self.type_i64(),
width => bug!("Unsupported target_c_int_width: {}", width),
}
}
- pub fn int_from_ty(
+ pub fn type_int_from_ty(
&self,
t: ast::IntTy
) -> &'ll Type {
match t {
ast::IntTy::Isize => self.isize_ty,
- ast::IntTy::I8 => self.i8(),
- ast::IntTy::I16 => self.i16(),
- ast::IntTy::I32 => self.i32(),
- ast::IntTy::I64 => self.i64(),
- ast::IntTy::I128 => self.i128(),
+ ast::IntTy::I8 => self.type_i8(),
+ ast::IntTy::I16 => self.type_i16(),
+ ast::IntTy::I32 => self.type_i32(),
+ ast::IntTy::I64 => self.type_i64(),
+ ast::IntTy::I128 => self.type_i128(),
}
}
- pub fn uint_from_ty(
+ pub fn type_uint_from_ty(
&self,
t: ast::UintTy
) -> &'ll Type {
match t {
ast::UintTy::Usize => self.isize_ty,
- ast::UintTy::U8 => self.i8(),
- ast::UintTy::U16 => self.i16(),
- ast::UintTy::U32 => self.i32(),
- ast::UintTy::U64 => self.i64(),
- ast::UintTy::U128 => self.i128(),
+ ast::UintTy::U8 => self.type_i8(),
+ ast::UintTy::U16 => self.type_i16(),
+ ast::UintTy::U32 => self.type_i32(),
+ ast::UintTy::U64 => self.type_i64(),
+ ast::UintTy::U128 => self.type_i128(),
}
}
- pub fn float_from_ty(
+ pub fn type_float_from_ty(
&self,
t: ast::FloatTy
) -> &'ll Type {
match t {
- ast::FloatTy::F32 => self.f32(),
- ast::FloatTy::F64 => self.f64(),
+ ast::FloatTy::F32 => self.type_f32(),
+ ast::FloatTy::F64 => self.type_f64(),
}
}
- pub fn from_integer(&self, i: layout::Integer) -> &'ll Type {
+ pub fn type_from_integer(&self, i: layout::Integer) -> &'ll Type {
use rustc::ty::layout::Integer::*;
match i {
- I8 => self.i8(),
- I16 => self.i16(),
- I32 => self.i32(),
- I64 => self.i64(),
- I128 => self.i128(),
+ I8 => self.type_i8(),
+ I16 => self.type_i16(),
+ I32 => self.type_i32(),
+ I64 => self.type_i64(),
+ I128 => self.type_i128(),
}
}
/// Return a LLVM type that has at most the required alignment,
/// as a conservative approximation for unknown pointee types.
- pub fn pointee_for_abi_align(&self, align: Align) -> &'ll Type {
+ pub fn type_pointee_for_abi_align(&self, align: Align) -> &'ll Type {
// FIXME(eddyb) We could find a better approximation if ity.align < align.
let ity = layout::Integer::approximate_abi_align(self, align);
- self.from_integer(ity)
+ self.type_from_integer(ity)
}
/// Return a LLVM type that has at most the required alignment,
/// and exactly the required size, as a best-effort padding array.
- pub fn padding_filler(&self, size: Size, align: Align) -> &'ll Type {
+ pub fn type_padding_filler(
+ &self,
+ size: Size,
+ align: Align
+ ) -> &'ll Type {
let unit = layout::Integer::approximate_abi_align(self, align);
let size = size.bytes();
let unit_size = unit.size().bytes();
assert_eq!(size % unit_size, 0);
- self.array(self.from_integer(unit), size / unit_size)
+ self.type_array(self.type_from_integer(unit), size / unit_size)
}
}
(cx.sess().target.target.arch == "x86" ||
cx.sess().target.target.arch == "x86_64");
if use_x86_mmx {
- return cx.x86_mmx()
+ return cx.type_x86_mmx()
} else {
let element = layout.scalar_llvm_type_at(cx, element, Size::ZERO);
- return cx.vector(element, count);
+ return cx.type_vector(element, count);
}
}
layout::Abi::ScalarPair(..) => {
- return cx.struct_( &[
+ return cx.type_struct( &[
layout.scalar_pair_element_llvm_type(cx, 0, false),
layout.scalar_pair_element_llvm_type(cx, 1, false),
], false);
match layout.fields {
layout::FieldPlacement::Union(_) => {
- let fill = cx.padding_filler( layout.size, layout.align);
+ let fill = cx.type_padding_filler( layout.size, layout.align);
let packed = false;
match name {
None => {
- cx.struct_( &[fill], packed)
+ cx.type_struct( &[fill], packed)
}
Some(ref name) => {
- let llty = cx.named_struct( name);
+ let llty = cx.type_named_struct( name);
cx.set_struct_body(llty, &[fill], packed);
llty
}
}
}
layout::FieldPlacement::Array { count, .. } => {
- cx.array(layout.field(cx, 0).llvm_type(cx), count)
+ cx.type_array(layout.field(cx, 0).llvm_type(cx), count)
}
layout::FieldPlacement::Arbitrary { .. } => {
match name {
None => {
let (llfields, packed) = struct_llfields(cx, layout);
- cx.struct_( &llfields, packed)
+ cx.type_struct( &llfields, packed)
}
Some(ref name) => {
- let llty = cx.named_struct( name);
+ let llty = cx.type_named_struct( name);
*defer = Some((llty, layout));
llty
}
let padding = target_offset - offset;
let padding_align = prev_effective_align.min(effective_field_align);
assert_eq!(offset.abi_align(padding_align) + padding, target_offset);
- result.push(cx.padding_filler( padding, padding_align));
+ result.push(cx.type_padding_filler( padding, padding_align));
debug!(" padding before: {:?}", padding);
result.push(field.llvm_type(cx));
assert_eq!(offset.abi_align(padding_align) + padding, layout.size);
debug!("struct_llfields: pad_bytes: {:?} offset: {:?} stride: {:?}",
padding, offset, layout.size);
- result.push(cx.padding_filler(padding, padding_align));
+ result.push(cx.type_padding_filler(padding, padding_align));
assert_eq!(result.len(), 1 + field_count * 2);
} else {
debug!("struct_llfields: offset: {:?} stride: {:?}",
let llty = match self.ty.sty {
ty::Ref(_, ty, _) |
ty::RawPtr(ty::TypeAndMut { ty, .. }) => {
- cx.ptr_to(cx.layout_of(ty).llvm_type(cx))
+ cx.type_ptr_to(cx.layout_of(ty).llvm_type(cx))
}
ty::Adt(def, _) if def.is_box() => {
- cx.ptr_to(cx.layout_of(self.ty.boxed_ty()).llvm_type(cx))
+ cx.type_ptr_to(cx.layout_of(self.ty.boxed_ty()).llvm_type(cx))
}
ty::FnPtr(sig) => {
let sig = cx.tcx.normalize_erasing_late_bound_regions(
fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
if let layout::Abi::Scalar(ref scalar) = self.abi {
if scalar.is_bool() {
- return cx.i1();
+ return cx.type_i1();
}
}
self.llvm_type(cx)
fn scalar_llvm_type_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>,
scalar: &layout::Scalar, offset: Size) -> &'a Type {
match scalar.value {
- layout::Int(i, _) => cx.from_integer( i),
- layout::Float(FloatTy::F32) => cx.f32(),
- layout::Float(FloatTy::F64) => cx.f64(),
+ layout::Int(i, _) => cx.type_from_integer( i),
+ layout::Float(FloatTy::F32) => cx.type_f32(),
+ layout::Float(FloatTy::F64) => cx.type_f64(),
layout::Pointer => {
// If we know the alignment, pick something better than i8.
let pointee = if let Some(pointee) = self.pointee_info_at(cx, offset) {
- cx.pointee_for_abi_align( pointee.align)
+ cx.type_pointee_for_abi_align( pointee.align)
} else {
- cx.i8()
+ cx.type_i8()
};
- cx.ptr_to(pointee)
+ cx.type_ptr_to(pointee)
}
}
}
// when immediate. We need to load/store `bool` as `i8` to avoid
// crippling LLVM optimizations or triggering other LLVM bugs with `i1`.
if immediate && scalar.is_bool() {
- return cx.i1();
+ return cx.type_i1();
}
let offset = if index == 0 {