self.layout.align,
bx.pointercast(llscratch, Type::i8p(cx)),
scratch_align,
- CodegenCx::c_usize(cx, self.layout.size.bytes()),
+ cx.c_usize(self.layout.size.bytes()),
MemFlags::empty());
bx.lifetime_end(llscratch, scratch_size);
let kind = llvm::LLVMGetMDKindIDInContext(bx.cx().llcx,
key.as_ptr() as *const c_char, key.len() as c_uint);
- let val: &'ll Value = CodegenCx::c_i32(bx.cx(), ia.ctxt.outer().as_u32() as i32);
+ let val: &'ll Value = bx.cx().c_i32(ia.ctxt.outer().as_u32() as i32);
llvm::LLVMSetMetadata(r, kind,
llvm::LLVMMDNodeInContext(bx.cx().llcx, &val, 1));
}
}
-pub fn compare_simd_types(
- bx: &Builder<'a, 'll, 'tcx>,
- lhs: &'ll Value,
- rhs: &'ll Value,
+pub fn compare_simd_types<'a, 'll:'a, 'tcx:'ll, Builder: BuilderMethods<'a, 'll, 'tcx>>(
+ bx: &Builder,
+ lhs: Builder::Value,
+ rhs: Builder::Value,
t: Ty<'tcx>,
- ret_ty: &'ll Type,
+ ret_ty: Builder::Type,
op: hir::BinOpKind
-) -> &'ll Value {
+) -> Builder::Value {
let signed = match t.sty {
ty::Float(_) => {
let cmp = bin_op_to_fcmp_predicate(op);
let (source, target) = cx.tcx.struct_lockstep_tails(source, target);
match (&source.sty, &target.sty) {
(&ty::Array(_, len), &ty::Slice(_)) => {
- CodegenCx::c_usize(cx, len.unwrap_usize(cx.tcx))
+ cx.c_usize(len.unwrap_usize(cx.tcx))
}
(&ty::Dynamic(..), &ty::Dynamic(..)) => {
// For now, upcasts are limited to changes in marker
return;
}
- call_memcpy(bx, dst, dst_align, src, src_align, CodegenCx::c_usize(bx.cx(), size), flags);
+ call_memcpy(bx, dst, dst_align, src, src_align, bx.cx().c_usize(size), flags);
}
pub fn call_memset(
let ptr_width = &bx.cx().sess().target.target.target_pointer_width;
let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width);
let llintrinsicfn = bx.cx().get_intrinsic(&intrinsic_key);
- let volatile = CodegenCx::c_bool(bx.cx(), volatile);
+ let volatile = bx.cx().c_bool(volatile);
bx.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None)
}
// *always* point to a metadata value of the integer 1.
//
// [1]: http://llvm.org/docs/LangRef.html#store-instruction
- let one = CodegenCx::c_i32(self.cx, 1);
+ let one = self.cx.c_i32(1);
let node = llvm::LLVMMDNodeInContext(self.cx.llcx, &one, 1);
llvm::LLVMSetMetadata(store, llvm::MD_nontemporal as c_uint, node);
}
unsafe {
let elt_ty = self.cx.val_ty(elt);
let undef = llvm::LLVMGetUndef(type_::Type::vector(elt_ty, num_elts as u64));
- let vec = self.insert_element(undef, elt, CodegenCx::c_i32(self.cx, 0));
+ let vec = self.insert_element(undef, elt, self.cx.c_i32(0));
let vec_i32_ty = type_::Type::vector(type_::Type::i32(self.cx), num_elts as u64);
self.shuffle_vector(vec, undef, self.cx.c_null(vec_i32_ty))
}
let lifetime_intrinsic = self.cx.get_intrinsic(intrinsic);
let ptr = self.pointercast(ptr, type_::Type::i8p(self.cx));
- self.call(lifetime_intrinsic, &[CodegenCx::c_u64(self.cx, size), ptr], None);
+ self.call(lifetime_intrinsic, &[self.cx.c_u64(size), ptr], None);
}
fn call(&self, llfn: &'ll Value, args: &[&'ll Value],
ty.is_freeze(tcx, ty::ParamEnv::reveal_all(), DUMMY_SP)
}
-pub struct OperandBundleDef<'a, Value : 'a> {
+pub struct OperandBundleDef<'a, Value: 'a> {
pub name: &'a str,
pub val: Value
}
type Context = &'ll llvm::Context;
}
-impl<'ll, 'tcx : 'll> CommonMethods for CodegenCx<'ll, 'tcx> {
+impl<'ll, 'tcx: 'll> CommonMethods for CodegenCx<'ll, 'tcx> {
// LLVM constant constructors.
fn c_null(&self, t: &'ll Type) -> &'ll Value {
}
}
-impl<'ll, 'tcx : 'll> CommonWriteMethods for CodegenCx<'ll, 'tcx> {
+impl<'ll, 'tcx: 'll> CommonWriteMethods for CodegenCx<'ll, 'tcx> {
fn val_ty(&self, v: &'ll Value) -> &'ll Type {
val_ty(v)
}
let gdb_debug_scripts_section = get_or_insert_gdb_debug_scripts_section_global(bx.cx());
// Load just the first byte as that's all that's necessary to force
// LLVM to keep around the reference to the global.
- let indices = [CodegenCx::c_i32(bx.cx(), 0), CodegenCx::c_i32(bx.cx(), 0)];
+ let indices = [bx.cx().c_i32(0), bx.cx().c_i32(0)];
let element = bx.inbounds_gep(gdb_debug_scripts_section, &indices);
let volative_load_instruction = bx.volatile_load(element);
unsafe {
bug!("symbol `{}` is already defined", section_var_name)
});
llvm::LLVMSetSection(section_var, section_name.as_ptr() as *const _);
- llvm::LLVMSetInitializer(section_var, CodegenCx::c_bytes(cx, section_contents));
+ llvm::LLVMSetInitializer(section_var, cx.c_bytes(section_contents));
llvm::LLVMSetGlobalConstant(section_var, llvm::True);
llvm::LLVMSetUnnamedAddr(section_var, llvm::True);
llvm::LLVMRustSetLinkage(section_var, llvm::Linkage::LinkOnceODRLinkage);
use builder::Builder;
use common::*;
-use context::CodegenCx;
use meth;
use rustc::ty::layout::LayoutOf;
use rustc::ty::{self, Ty};
let sized_align = layout.align.abi();
debug!("DST {} statically sized prefix size: {} align: {}",
t, sized_size, sized_align);
- let sized_size = CodegenCx::c_usize(cx, sized_size);
- let sized_align = CodegenCx::c_usize(cx, sized_align);
+ let sized_size = cx.c_usize(sized_size);
+ let sized_align = cx.c_usize(sized_align);
// Recurse to get the size of the dynamically sized field (must be
// the last field).
(Some(sized_align), Some(unsized_align)) => {
// If both alignments are constant, (the sized_align should always be), then
// pick the correct alignment statically.
- CodegenCx::c_usize(cx, std::cmp::max(sized_align, unsized_align) as u64)
+ cx.c_usize(std::cmp::max(sized_align, unsized_align) as u64)
}
_ => bx.select(bx.icmp(IntPredicate::IntUGT, sized_align, unsized_align),
sized_align,
//
// `(size + (align-1)) & -align`
- let addend = bx.sub(align, CodegenCx::c_usize(bx.cx(), 1));
+ let addend = bx.sub(align, bx.cx().c_usize(1));
let size = bx.and(bx.add(size, addend), bx.neg(align));
(size, align)
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+use std::fmt::Debug;
+
pub trait Backend {
- type Value;
+ type Value: Debug + PartialEq;
type BasicBlock;
type Type;
type Context;
-pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll> : Backend {
+pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll>: Backend {
fn new_block<'b>(
cx: &'a CodegenCx<'ll, 'tcx, Self::Value>,
typ: &str,
llfn: Self::Value,
args: &'b [Self::Value]
- ) -> Cow<'b, [Self::Value]> where [Self::Value] : ToOwned;
+ ) -> Cow<'b, [Self::Value]> where [Self::Value]: ToOwned;
fn lifetime_start(&self, ptr: Self::Value, size: Size);
fn lifetime_end(&self, ptr: Self::Value, size: Size);
use super::Backend;
use syntax::symbol::LocalInternedString;
-pub trait CommonMethods : Backend + CommonWriteMethods {
+pub trait CommonMethods: Backend + CommonWriteMethods {
// Constant constructors
fn c_null(&self, t: Self::Type) -> Self::Value;
fn c_undef(&self, t: Self::Type) -> Self::Value;
fn const_get_elt(&self, v: Self::Value, idx: u64) -> Self::Value;
fn const_get_real(&self, v: Self::Value) -> Option<(f64, bool)>;
fn const_to_uint(&self, v: Self::Value) -> u64;
+ fn const_to_opt_u128(&self, v: Self::Value, sign_ext: bool) -> Option<u128>;
+
fn is_const_integral(&self, v: Self::Value) -> bool;
fn is_const_real(&self, v: Self::Value) -> bool;
- fn const_to_opt_u128(&self, v: Self::Value, sign_ext: bool) -> Option<u128>;
}
-pub trait CommonWriteMethods : Backend {
+pub trait CommonWriteMethods: Backend {
fn val_ty(&self, v: Self::Value) -> Self::Type;
fn c_bytes_in_context(&self, llcx: Self::Context, bytes: &[u8]) -> Self::Value;
fn c_struct_in_context(
}
impl AtomicRmwBinOp {
- pub fn from_generic(op : common::AtomicRmwBinOp) -> Self {
+ pub fn from_generic(op: common::AtomicRmwBinOp) -> Self {
match op {
common::AtomicRmwBinOp::AtomicXchg => AtomicRmwBinOp::AtomicXchg,
common::AtomicRmwBinOp::AtomicAdd => AtomicRmwBinOp::AtomicAdd,
}
impl AtomicOrdering {
- pub fn from_generic(ao : common::AtomicOrdering) -> Self {
+ pub fn from_generic(ao: common::AtomicOrdering) -> Self {
match ao {
common::AtomicOrdering::NotAtomic => AtomicOrdering::NotAtomic,
common::AtomicOrdering::Unordered => AtomicOrdering::Unordered,
}
impl SynchronizationScope {
- pub fn from_generic(sc : common::SynchronizationScope) -> Self {
+ pub fn from_generic(sc: common::SynchronizationScope) -> Self {
match sc {
common::SynchronizationScope::Other => SynchronizationScope::Other,
common::SynchronizationScope::SingleThread => SynchronizationScope::SingleThread,
}
impl AsmDialect {
- pub fn from_generic(asm : syntax::ast::AsmDialect) -> Self {
+ pub fn from_generic(asm: syntax::ast::AsmDialect) -> Self {
match asm {
syntax::ast::AsmDialect::Att => AsmDialect::Att,
syntax::ast::AsmDialect::Intel => AsmDialect::Intel
OperandBundleDef { raw: def }
}
- pub fn from_generic(bundle : &common::OperandBundleDef<'a, &'a Value>) -> Self {
+ pub fn from_generic(bundle: &common::OperandBundleDef<'a, &'a Value>) -> Self {
Self::new(bundle.name, &[bundle.val])
}
}
let llvtable = bx.pointercast(llvtable, fn_ty.ptr_to_llvm_type(bx.cx()).ptr_to());
let ptr_align = bx.tcx().data_layout.pointer_align;
let ptr = bx.load(
- bx.inbounds_gep(llvtable, &[CodegenCx::c_usize(bx.cx(), self.0)]),
+ bx.inbounds_gep(llvtable, &[bx.cx().c_usize(self.0)]),
ptr_align
);
bx.nonnull_metadata(ptr);
let llvtable = bx.pointercast(llvtable, Type::isize(bx.cx()).ptr_to());
let usize_align = bx.tcx().data_layout.pointer_align;
let ptr = bx.load(
- bx.inbounds_gep(llvtable, &[CodegenCx::c_usize(bx.cx(), self.0)]),
+ bx.inbounds_gep(llvtable, &[bx.cx().c_usize(self.0)]),
usize_align
);
// Vtable loads are invariant
// /////////////////////////////////////////////////////////////////////////////////////////////
let components: Vec<_> = [
callee::get_fn(cx, monomorphize::resolve_drop_in_place(cx.tcx, ty)),
- CodegenCx::c_usize(cx, size.bytes()),
- CodegenCx::c_usize(cx, align.abi())
+ cx.c_usize(size.bytes()),
+ cx.c_usize(align.abi())
].iter().cloned().chain(methods).collect();
- let vtable_const = CodegenCx::c_struct(cx, &components, false);
+ let vtable_const = cx.c_struct(&components, false);
let align = cx.data_layout().pointer_align;
let vtable = consts::addr_of(cx, vtable_const, align, Some("vtable"));
use callee;
use builder::{Builder, MemFlags};
use common::{self, IntPredicate};
-use context::CodegenCx;
use consts;
use meth;
use monomorphize;
// Pass the condition through llvm.expect for branch hinting.
let expect = bx.cx().get_intrinsic(&"llvm.expect.i1");
- let cond = bx.call(expect, &[cond, CodegenCx::c_bool(bx.cx(), expected)], None);
+ let cond = bx.call(expect, &[cond, bx.cx().c_bool(expected)], None);
// Create the failure block and the conditional branch to it.
let lltarget = llblock(self, target);
// Get the location information.
let loc = bx.sess().source_map().lookup_char_pos(span.lo());
let filename = Symbol::intern(&loc.file.name.to_string()).as_str();
- let filename = CodegenCx::c_str_slice(bx.cx(), filename);
- let line = CodegenCx::c_u32(bx.cx(), loc.line as u32);
- let col = CodegenCx::c_u32(bx.cx(), loc.col.to_usize() as u32 + 1);
+ let filename = bx.cx().c_str_slice(filename);
+ let line = bx.cx().c_u32(loc.line as u32);
+ let col = bx.cx().c_u32(loc.col.to_usize() as u32 + 1);
let align = tcx.data_layout.aggregate_align
.max(tcx.data_layout.i32_align)
.max(tcx.data_layout.pointer_align);
let len = self.codegen_operand(&mut bx, len).immediate();
let index = self.codegen_operand(&mut bx, index).immediate();
- let file_line_col = CodegenCx::c_struct(bx.cx(),
- &[filename, line, col], false);
+ let file_line_col = bx.cx().c_struct(&[filename, line, col], false);
let file_line_col = consts::addr_of(bx.cx(),
file_line_col,
align,
_ => {
let str = msg.description();
let msg_str = Symbol::intern(str).as_str();
- let msg_str = CodegenCx::c_str_slice(bx.cx(), msg_str);
- let msg_file_line_col = CodegenCx::c_struct(
- bx.cx(),
+ let msg_str = bx.cx().c_str_slice(msg_str);
+ let msg_file_line_col = bx.cx().c_struct(
&[msg_str, filename, line, col],
false
);
assert_eq!(offset as usize as u64, offset);
let offset = offset as usize;
if offset > next_offset {
- llvals.push(CodegenCx::c_bytes(cx, &alloc.bytes[next_offset..offset]));
+ llvals.push(cx.c_bytes(&alloc.bytes[next_offset..offset]));
}
let ptr_offset = read_target_uint(
dl.endian,
next_offset = offset + pointer_size;
}
if alloc.bytes.len() >= next_offset {
- llvals.push(CodegenCx::c_bytes(cx, &alloc.bytes[next_offset ..]));
+ llvals.push(cx.c_bytes(&alloc.bytes[next_offset ..]));
}
- CodegenCx::c_struct(cx, &llvals, true)
+ cx.c_struct(&llvals, true)
}
pub fn codegen_static_initializer(
bug!("simd shuffle field {:?}", field)
}
}).collect();
- let llval = CodegenCx::c_struct(bx.cx(), &values?, false);
+ let llval = bx.cx().c_struct(&values?, false);
Ok((llval, c.ty))
})
.unwrap_or_else(|_| {
// it's null. The 64 here is actually a bitfield which
// represents that this is a catch-all block.
let null = bx.cx().c_null(Type::i8p(bx.cx()));
- let sixty_four = CodegenCx::c_i32(bx.cx(), 64);
+ let sixty_four = bx.cx().c_i32(64);
cleanup = cp_bx.catch_pad(cs, &[null, sixty_four, null]);
cp_bx.br(llbb);
}