#![allow(dead_code)] // FFI wrappers
use llvm;
-use llvm::{CallConv, AtomicBinOp, AtomicOrdering, AsmDialect};
+use llvm::{CallConv, AtomicBinOp, AtomicOrdering, AsmDialect, AttrBuilder};
use llvm::{Opcode, IntPredicate, RealPredicate, False};
use llvm::{ValueRef, BasicBlockRef, BuilderRef, ModuleRef};
use middle::trans::base;
use std::string::String;
use syntax::codemap::Span;
-pub struct Builder<'a> {
+pub struct Builder<'a, 'tcx: 'a> {
pub llbuilder: BuilderRef,
- pub ccx: &'a CrateContext,
+ pub ccx: &'a CrateContext<'a, 'tcx>,
}
// This is a really awful way to get a zero-length c-string, but better (and a
&cnull as *const c_char
}
-impl<'a> Builder<'a> {
- pub fn new(ccx: &'a CrateContext) -> Builder<'a> {
+impl<'a, 'tcx> Builder<'a, 'tcx> {
+ pub fn new(ccx: &'a CrateContext<'a, 'tcx>) -> Builder<'a, 'tcx> {
Builder {
- llbuilder: ccx.builder.b,
+ llbuilder: ccx.raw_builder(),
ccx: ccx,
}
}
pub fn count_insn(&self, category: &str) {
if self.ccx.sess().trans_stats() {
- self.ccx.stats.n_llvm_insns.set(self.ccx
- .stats
+ self.ccx.stats().n_llvm_insns.set(self.ccx
+ .stats()
.n_llvm_insns
.get() + 1);
}
+ self.ccx.count_llvm_insn();
if self.ccx.sess().count_llvm_insns() {
base::with_insn_ctxt(|v| {
- let mut h = self.ccx.stats.llvm_insns.borrow_mut();
+ let mut h = self.ccx.stats().llvm_insns.borrow_mut();
// Build version of path with cycles removed.
args: &[ValueRef],
then: BasicBlockRef,
catch: BasicBlockRef,
- attributes: &[(uint, u64)])
+ attributes: Option<AttrBuilder>)
-> ValueRef {
self.count_insn("invoke");
debug!("Invoke {} with args ({})",
- self.ccx.tn.val_to_string(llfn),
+ self.ccx.tn().val_to_string(llfn),
args.iter()
- .map(|&v| self.ccx.tn.val_to_string(v))
+ .map(|&v| self.ccx.tn().val_to_string(v))
.collect::<Vec<String>>()
.connect(", "));
then,
catch,
noname());
- for &(idx, attr) in attributes.iter() {
- llvm::LLVMAddCallSiteAttribute(v, idx as c_uint, attr);
+ match attributes {
+ Some(a) => a.apply_callsite(v),
+ None => {}
}
v
}
let v = [min, max];
llvm::LLVMSetMetadata(value, llvm::MD_range as c_uint,
- llvm::LLVMMDNodeInContext(self.ccx.llcx,
+ llvm::LLVMMDNodeInContext(self.ccx.llcx(),
v.as_ptr(), v.len() as c_uint));
}
pub fn store(&self, val: ValueRef, ptr: ValueRef) {
debug!("Store {} -> {}",
- self.ccx.tn.val_to_string(val),
- self.ccx.tn.val_to_string(ptr));
+ self.ccx.tn().val_to_string(val),
+ self.ccx.tn().val_to_string(ptr));
assert!(self.llbuilder.is_not_null());
self.count_insn("store");
unsafe {
pub fn volatile_store(&self, val: ValueRef, ptr: ValueRef) {
debug!("Store {} -> {}",
- self.ccx.tn.val_to_string(val),
- self.ccx.tn.val_to_string(ptr));
+ self.ccx.tn().val_to_string(val),
+ self.ccx.tn().val_to_string(ptr));
assert!(self.llbuilder.is_not_null());
self.count_insn("store.volatile");
unsafe {
pub fn atomic_store(&self, val: ValueRef, ptr: ValueRef, order: AtomicOrdering) {
debug!("Store {} -> {}",
- self.ccx.tn.val_to_string(val),
- self.ccx.tn.val_to_string(ptr));
+ self.ccx.tn().val_to_string(val),
+ self.ccx.tn().val_to_string(ptr));
self.count_insn("store.atomic");
unsafe {
let ty = Type::from_ref(llvm::LLVMTypeOf(ptr));
// we care about.
if ixs.len() < 16 {
let mut small_vec = [ C_i32(self.ccx, 0), ..16 ];
- for (small_vec_e, &ix) in small_vec.mut_iter().zip(ixs.iter()) {
+ for (small_vec_e, &ix) in small_vec.iter_mut().zip(ixs.iter()) {
*small_vec_e = C_i32(self.ccx, ix as i32);
}
self.inbounds_gep(base, small_vec.slice(0, ixs.len()))
c, noname(), False, False)
}
});
- self.call(asm, [], []);
+ self.call(asm, [], None);
}
}
else { llvm::False };
let argtys = inputs.iter().map(|v| {
- debug!("Asm Input Type: {:?}", self.ccx.tn.val_to_string(*v));
+ debug!("Asm Input Type: {:?}", self.ccx.tn().val_to_string(*v));
val_ty(*v)
}).collect::<Vec<_>>();
- debug!("Asm Output Type: {:?}", self.ccx.tn.type_to_string(output));
+ debug!("Asm Output Type: {:?}", self.ccx.tn().type_to_string(output));
let fty = Type::func(argtys.as_slice(), &output);
unsafe {
let v = llvm::LLVMInlineAsm(
fty.to_ref(), asm, cons, volatile, alignstack, dia as c_uint);
- self.call(v, inputs, [])
+ self.call(v, inputs, None)
}
}
pub fn call(&self, llfn: ValueRef, args: &[ValueRef],
- attributes: &[(uint, u64)]) -> ValueRef {
+ attributes: Option<AttrBuilder>) -> ValueRef {
self.count_insn("call");
debug!("Call {} with args ({})",
- self.ccx.tn.val_to_string(llfn),
+ self.ccx.tn().val_to_string(llfn),
args.iter()
- .map(|&v| self.ccx.tn.val_to_string(v))
+ .map(|&v| self.ccx.tn().val_to_string(v))
.collect::<Vec<String>>()
.connect(", "));
unsafe {
let v = llvm::LLVMBuildCall(self.llbuilder, llfn, args.as_ptr(),
args.len() as c_uint, noname());
- for &(idx, attr) in attributes.iter() {
- llvm::LLVMAddCallSiteAttribute(v, idx as c_uint, attr);
+ match attributes {
+ Some(a) => a.apply_callsite(v),
+ None => {}
}
v
}
}
pub fn call_with_conv(&self, llfn: ValueRef, args: &[ValueRef],
- conv: CallConv, attributes: &[(uint, u64)]) -> ValueRef {
+ conv: CallConv, attributes: Option<AttrBuilder>) -> ValueRef {
self.count_insn("callwithconv");
let v = self.call(llfn, args, attributes);
llvm::SetInstructionCallConv(v, conv);