use attributes;
use intrinsics::{self, Intrinsic};
use llvm::{self, TypeKind};
+use llvm_util;
use abi::{Abi, FnType, LlvmType, PassMode};
use mir::place::PlaceRef;
use mir::operand::{OperandRef, OperandValue};
use base::*;
use common::*;
+use context::CodegenCx;
use declare;
use glue;
use type_::Type;
use type_of::LayoutLlvmExt;
use rustc::ty::{self, Ty};
-use rustc::ty::layout::{HasDataLayout, LayoutOf};
+use rustc::ty::layout::LayoutOf;
use rustc::hir;
use syntax::ast;
use syntax::symbol::Symbol;
use builder::Builder;
use value::Value;
+use interfaces::{BuilderMethods, CommonMethods};
+
use rustc::session::Session;
use syntax_pos::Span;
bx: &Builder<'a, 'll, 'tcx>,
callee_ty: Ty<'tcx>,
fn_ty: &FnType<'tcx, Ty<'tcx>>,
- args: &[OperandRef<'ll, 'tcx>],
+ args: &[OperandRef<'tcx, &'ll Value>],
llresult: &'ll Value,
span: Span,
) {
- let cx = bx.cx;
+ let cx = bx.cx();
let tcx = cx.tcx;
let (def_id, substs) = match callee_ty.sty {
},
"likely" => {
let expect = cx.get_intrinsic(&("llvm.expect.i1"));
- bx.call(expect, &[args[0].immediate(), C_bool(cx, true)], None)
+ bx.call(expect, &[args[0].immediate(), bx.cx().c_bool(true)], None)
}
"unlikely" => {
let expect = cx.get_intrinsic(&("llvm.expect.i1"));
- bx.call(expect, &[args[0].immediate(), C_bool(cx, false)], None)
+ bx.call(expect, &[args[0].immediate(), bx.cx().c_bool(false)], None)
}
"try" => {
try_intrinsic(bx, cx,
}
"size_of" => {
let tp_ty = substs.type_at(0);
- C_usize(cx, cx.size_of(tp_ty).bytes())
+ cx.c_usize(cx.size_of(tp_ty).bytes())
}
"size_of_val" => {
let tp_ty = substs.type_at(0);
glue::size_and_align_of_dst(bx, tp_ty, Some(meta));
llsize
} else {
- C_usize(cx, cx.size_of(tp_ty).bytes())
+ cx.c_usize(cx.size_of(tp_ty).bytes())
}
}
"min_align_of" => {
let tp_ty = substs.type_at(0);
- C_usize(cx, cx.align_of(tp_ty).abi())
+ cx.c_usize(cx.align_of(tp_ty).abi())
}
"min_align_of_val" => {
let tp_ty = substs.type_at(0);
glue::size_and_align_of_dst(bx, tp_ty, Some(meta));
llalign
} else {
- C_usize(cx, cx.align_of(tp_ty).abi())
+ cx.c_usize(cx.align_of(tp_ty).abi())
}
}
"pref_align_of" => {
let tp_ty = substs.type_at(0);
- C_usize(cx, cx.align_of(tp_ty).pref())
+ cx.c_usize(cx.align_of(tp_ty).pref())
}
"type_name" => {
let tp_ty = substs.type_at(0);
let ty_name = Symbol::intern(&tp_ty.to_string()).as_str();
- C_str_slice(cx, ty_name)
+ cx.c_str_slice(ty_name)
}
"type_id" => {
- C_u64(cx, cx.tcx.type_id_hash(substs.type_at(0)))
+ cx.c_u64(cx.tcx.type_id_hash(substs.type_at(0)))
}
"init" => {
let ty = substs.type_at(0);
// If we store a zero constant, LLVM will drown in vreg allocation for large data
// structures, and the generated code will be awful. (A telltale sign of this is
// large quantities of `mov [byte ptr foo],0` in the generated code.)
- memset_intrinsic(bx, false, ty, llresult, C_u8(cx, 0), C_usize(cx, 1));
+ memset_intrinsic(
+ bx,
+ false,
+ ty,
+ llresult,
+ cx.c_u8(0),
+ cx.c_usize(1)
+ );
}
return;
}
"needs_drop" => {
let tp_ty = substs.type_at(0);
- C_bool(cx, bx.cx.type_needs_drop(tp_ty))
+ cx.c_bool(bx.cx().type_needs_drop(tp_ty))
}
"offset" => {
let ptr = args[0].immediate();
to_immediate(bx, load, cx.layout_of(tp_ty))
},
"volatile_store" => {
- let dst = args[0].deref(bx.cx);
+ let dst = args[0].deref(bx.cx());
args[1].val.volatile_store(bx, dst);
return;
},
"unaligned_volatile_store" => {
- let dst = args[0].deref(bx.cx);
+ let dst = args[0].deref(bx.cx());
args[1].val.unaligned_volatile_store(bx, dst);
return;
},
};
bx.call(expect, &[
args[0].immediate(),
- C_i32(cx, rw),
+ cx.c_i32(rw),
args[1].immediate(),
- C_i32(cx, cache_type)
+ cx.c_i32(cache_type)
], None)
},
"ctlz" | "ctlz_nonzero" | "cttz" | "cttz_nonzero" | "ctpop" | "bswap" |
"bitreverse" | "add_with_overflow" | "sub_with_overflow" |
"mul_with_overflow" | "overflowing_add" | "overflowing_sub" | "overflowing_mul" |
- "unchecked_div" | "unchecked_rem" | "unchecked_shl" | "unchecked_shr" | "exact_div" => {
+ "unchecked_div" | "unchecked_rem" | "unchecked_shl" | "unchecked_shr" | "exact_div" |
+ "rotate_left" | "rotate_right" => {
let ty = arg_tys[0];
match int_type_width_signed(ty, cx) {
Some((width, signed)) =>
match name {
"ctlz" | "cttz" => {
- let y = C_bool(bx.cx, false);
+ let y = cx.c_bool(false);
let llfn = cx.get_intrinsic(&format!("llvm.{}.i{}", name, width));
bx.call(llfn, &[args[0].immediate(), y], None)
}
"ctlz_nonzero" | "cttz_nonzero" => {
- let y = C_bool(bx.cx, true);
+ let y = cx.c_bool(true);
let llvm_name = &format!("llvm.{}.i{}", &name[..4], width);
let llfn = cx.get_intrinsic(llvm_name);
bx.call(llfn, &[args[0].immediate(), y], None)
let intrinsic = format!("llvm.{}{}.with.overflow.i{}",
if signed { 's' } else { 'u' },
&name[..3], width);
- let llfn = bx.cx.get_intrinsic(&intrinsic);
+ let llfn = bx.cx().get_intrinsic(&intrinsic);
// Convert `i1` to a `bool`, and write it to the out parameter
let pair = bx.call(llfn, &[
} else {
bx.lshr(args[0].immediate(), args[1].immediate())
},
+ "rotate_left" | "rotate_right" => {
+ let is_left = name == "rotate_left";
+ let val = args[0].immediate();
+ let raw_shift = args[1].immediate();
+ if llvm_util::get_major_version() >= 7 {
+ // rotate = funnel shift with first two args the same
+ let llvm_name = &format!("llvm.fsh{}.i{}",
+ if is_left { 'l' } else { 'r' }, width);
+ let llfn = cx.get_intrinsic(llvm_name);
+ bx.call(llfn, &[val, val, raw_shift], None)
+ } else {
+ // rotate_left: (X << (S % BW)) | (X >> ((BW - S) % BW))
+ // rotate_right: (X << ((BW - S) % BW)) | (X >> (S % BW))
+ let width = cx.c_uint(Type::ix(cx, width), width);
+ let shift = bx.urem(raw_shift, width);
+ let inv_shift = bx.urem(bx.sub(width, raw_shift), width);
+ let shift1 = bx.shl(val, if is_left { shift } else { inv_shift });
+ let shift2 = bx.lshr(val, if !is_left { shift } else { inv_shift });
+ bx.or(shift1, shift2)
+ }
+ },
_ => bug!(),
},
None => {
},
"discriminant_value" => {
- args[0].deref(bx.cx).codegen_get_discr(bx, ret_ty)
+ args[0].deref(bx.cx()).codegen_get_discr(bx, ret_ty)
}
name if name.starts_with("simd_") => {
// This requires that atomic intrinsics follow a specific naming pattern:
// "atomic_<operation>[_<ordering>]", and no ordering means SeqCst
name if name.starts_with("atomic_") => {
- use llvm::AtomicOrdering::*;
+ use self::AtomicOrdering::*;
let split: Vec<&str> = name.split('_').collect();
"cxchg" | "cxchgweak" => {
let ty = substs.type_at(0);
if int_type_width_signed(ty, cx).is_some() {
- let weak = if split[1] == "cxchgweak" { llvm::True } else { llvm::False };
+ let weak = split[1] == "cxchgweak";
let pair = bx.atomic_cmpxchg(
args[0].immediate(),
args[1].immediate(),
failorder,
weak);
let val = bx.extract_value(pair, 0);
- let success = bx.zext(bx.extract_value(pair, 1), Type::bool(bx.cx));
+ let success = bx.zext(bx.extract_value(pair, 1), Type::bool(bx.cx()));
let dest = result.project_field(bx, 0);
bx.store(val, dest.llval, dest.align);
"load" => {
let ty = substs.type_at(0);
if int_type_width_signed(ty, cx).is_some() {
- let align = cx.align_of(ty);
- bx.atomic_load(args[0].immediate(), order, align)
+ let size = cx.size_of(ty);
+ bx.atomic_load(args[0].immediate(), order, size)
} else {
return invalid_monomorphization(ty);
}
"store" => {
let ty = substs.type_at(0);
if int_type_width_signed(ty, cx).is_some() {
- let align = cx.align_of(ty);
- bx.atomic_store(args[1].immediate(), args[0].immediate(), order, align);
+ let size = cx.size_of(ty);
+ bx.atomic_store(args[1].immediate(), args[0].immediate(), order, size);
return;
} else {
return invalid_monomorphization(ty);
}
"fence" => {
- bx.atomic_fence(order, llvm::SynchronizationScope::CrossThread);
+ bx.atomic_fence(order, SynchronizationScope::CrossThread);
return;
}
"singlethreadfence" => {
- bx.atomic_fence(order, llvm::SynchronizationScope::SingleThread);
+ bx.atomic_fence(order, SynchronizationScope::SingleThread);
return;
}
// These are all AtomicRMW ops
op => {
let atom_op = match op {
- "xchg" => llvm::AtomicXchg,
- "xadd" => llvm::AtomicAdd,
- "xsub" => llvm::AtomicSub,
- "and" => llvm::AtomicAnd,
- "nand" => llvm::AtomicNand,
- "or" => llvm::AtomicOr,
- "xor" => llvm::AtomicXor,
- "max" => llvm::AtomicMax,
- "min" => llvm::AtomicMin,
- "umax" => llvm::AtomicUMax,
- "umin" => llvm::AtomicUMin,
+ "xchg" => AtomicRmwBinOp::AtomicXchg,
+ "xadd" => AtomicRmwBinOp::AtomicAdd,
+ "xsub" => AtomicRmwBinOp::AtomicSub,
+ "and" => AtomicRmwBinOp::AtomicAnd,
+ "nand" => AtomicRmwBinOp::AtomicNand,
+ "or" => AtomicRmwBinOp::AtomicOr,
+ "xor" => AtomicRmwBinOp::AtomicXor,
+ "max" => AtomicRmwBinOp::AtomicMax,
+ "min" => AtomicRmwBinOp::AtomicMin,
+ "umax" => AtomicRmwBinOp::AtomicUMax,
+ "umin" => AtomicRmwBinOp::AtomicUMin,
_ => cx.sess().fatal("unknown atomic operation")
};
}
"nontemporal_store" => {
- let dst = args[0].deref(bx.cx);
+ let dst = args[0].deref(bx.cx());
args[1].val.nontemporal_store(bx, dst);
return;
}
fn modify_as_needed(
bx: &Builder<'a, 'll, 'tcx>,
t: &intrinsics::Type,
- arg: &OperandRef<'ll, 'tcx>,
+ arg: &OperandRef<'tcx, &'ll Value>,
) -> Vec<&'ll Value> {
match *t {
intrinsics::Type::Aggregate(true, ref contents) => {
// This assumes the type is "simple", i.e. no
// destructors, and the contents are SIMD
// etc.
- assert!(!bx.cx.type_needs_drop(arg.layout.ty));
+ assert!(!bx.cx().type_needs_drop(arg.layout.ty));
let (ptr, align) = match arg.val {
OperandValue::Ref(ptr, None, align) => (ptr, align),
_ => bug!()
}).collect()
}
intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => {
- let llvm_elem = one(ty_to_type(bx.cx, llvm_elem));
+ let llvm_elem = one(ty_to_type(bx.cx(), llvm_elem));
vec![bx.pointercast(arg.immediate(), llvm_elem.ptr_to())]
}
intrinsics::Type::Vector(_, Some(ref llvm_elem), length) => {
- let llvm_elem = one(ty_to_type(bx.cx, llvm_elem));
- vec![bx.bitcast(arg.immediate(), Type::vector(llvm_elem, length as u64))]
+ let llvm_elem = one(ty_to_type(bx.cx(), llvm_elem));
+ vec![
+ bx.bitcast(arg.immediate(),
+ Type::vector(llvm_elem, length as u64))
+ ]
}
intrinsics::Type::Integer(_, width, llvm_width) if width != llvm_width => {
// the LLVM intrinsic uses a smaller integer
// size than the C intrinsic's signature, so
// we have to trim it down here.
- vec![bx.trunc(arg.immediate(), Type::ix(bx.cx, llvm_width as u64))]
+ vec![bx.trunc(arg.immediate(), Type::ix(bx.cx(), llvm_width as u64))]
}
_ => vec![arg.immediate()],
}
src: &'ll Value,
count: &'ll Value,
) -> &'ll Value {
- let cx = bx.cx;
+ let cx = bx.cx();
let (size, align) = cx.size_and_align_of(ty);
- let size = C_usize(cx, size.bytes());
- let align = C_i32(cx, align.abi() as i32);
-
- let operation = if allow_overlap {
- "memmove"
- } else {
- "memcpy"
- };
-
- let name = format!("llvm.{}.p0i8.p0i8.i{}", operation,
- cx.data_layout().pointer_size.bits());
-
+ let size = cx.c_usize(size.bytes());
+ let align = align.abi();
let dst_ptr = bx.pointercast(dst, Type::i8p(cx));
let src_ptr = bx.pointercast(src, Type::i8p(cx));
- let llfn = cx.get_intrinsic(&name);
-
- bx.call(llfn,
- &[dst_ptr,
- src_ptr,
- bx.mul(size, count),
- align,
- C_bool(cx, volatile)],
- None)
+ if allow_overlap {
+ bx.memmove(dst_ptr, align, src_ptr, align, bx.mul(size, count), volatile)
+ } else {
+ bx.memcpy(dst_ptr, align, src_ptr, align, bx.mul(size, count), volatile)
+ }
}
fn memset_intrinsic(
val: &'ll Value,
count: &'ll Value
) -> &'ll Value {
- let cx = bx.cx;
+ let cx = bx.cx();
let (size, align) = cx.size_and_align_of(ty);
- let size = C_usize(cx, size.bytes());
- let align = C_i32(cx, align.abi() as i32);
+ let size = cx.c_usize(size.bytes());
+ let align = cx.c_i32(align.abi() as i32);
let dst = bx.pointercast(dst, Type::i8p(cx));
call_memset(bx, dst, val, bx.mul(size, count), align, volatile)
}
if bx.sess().no_landing_pads() {
bx.call(func, &[data], None);
let ptr_align = bx.tcx().data_layout.pointer_align;
- bx.store(C_null(Type::i8p(&bx.cx)), dest, ptr_align);
+ bx.store(bx.cx().c_null(Type::i8p(&bx.cx())), dest, ptr_align);
} else if wants_msvc_seh(bx.sess()) {
codegen_msvc_try(bx, cx, func, data, local_ptr, dest);
} else {
dest: &'ll Value,
) {
let llfn = get_rust_try_fn(cx, &mut |bx| {
- let cx = bx.cx;
+ let cx = bx.cx();
- bx.set_personality_fn(bx.cx.eh_personality());
+ bx.set_personality_fn(bx.cx().eh_personality());
let normal = bx.build_sibling_block("normal");
let catchswitch = bx.build_sibling_block("catchswitch");
let slot = bx.alloca(i64p, "slot", ptr_align);
bx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(), None);
- normal.ret(C_i32(cx, 0));
+ normal.ret(cx.c_i32(0));
let cs = catchswitch.catch_switch(None, None, 1);
catchswitch.add_handler(cs, catchpad.llbb());
Some(did) => ::consts::get_static(cx, did),
None => bug!("msvc_try_filter not defined"),
};
- let tok = catchpad.catch_pad(cs, &[tydesc, C_i32(cx, 0), slot]);
+ let tok = catchpad.catch_pad(cs, &[tydesc, cx.c_i32(0), slot]);
let addr = catchpad.load(slot, ptr_align);
let i64_align = bx.tcx().data_layout.i64_align;
let arg1 = catchpad.load(addr, i64_align);
- let val1 = C_i32(cx, 1);
+ let val1 = cx.c_i32(1);
let arg2 = catchpad.load(catchpad.inbounds_gep(addr, &[val1]), i64_align);
let local_ptr = catchpad.bitcast(local_ptr, i64p);
catchpad.store(arg1, local_ptr, i64_align);
catchpad.store(arg2, catchpad.inbounds_gep(local_ptr, &[val1]), i64_align);
catchpad.catch_ret(tok, caught.llbb());
- caught.ret(C_i32(cx, 1));
+ caught.ret(cx.c_i32(1));
});
// Note that no invoke is used here because by definition this function
dest: &'ll Value,
) {
let llfn = get_rust_try_fn(cx, &mut |bx| {
- let cx = bx.cx;
+ let cx = bx.cx();
// Codegens the shims described above:
//
let data = llvm::get_param(bx.llfn(), 1);
let local_ptr = llvm::get_param(bx.llfn(), 2);
bx.invoke(func, &[data], then.llbb(), catch.llbb(), None);
- then.ret(C_i32(cx, 0));
+ then.ret(cx.c_i32(0));
// Type indicator for the exception being thrown.
//
// the landing pad clauses the exception's type had been matched to.
// rust_try ignores the selector.
let lpad_ty = Type::struct_(cx, &[Type::i8p(cx), Type::i32(cx)], false);
- let vals = catch.landing_pad(lpad_ty, bx.cx.eh_personality(), 1);
- catch.add_clause(vals, C_null(Type::i8p(cx)));
+ let vals = catch.landing_pad(lpad_ty, bx.cx().eh_personality(), 1);
+ catch.add_clause(vals, bx.cx().c_null(Type::i8p(cx)));
let ptr = catch.extract_value(vals, 0);
let ptr_align = bx.tcx().data_layout.pointer_align;
catch.store(ptr, catch.bitcast(local_ptr, Type::i8p(cx).ptr_to()), ptr_align);
- catch.ret(C_i32(cx, 1));
+ catch.ret(cx.c_i32(1));
});
// Note that no invoke is used here because by definition this function
bx: &Builder<'a, 'll, 'tcx>,
name: &str,
callee_ty: Ty<'tcx>,
- args: &[OperandRef<'ll, 'tcx>],
+ args: &[OperandRef<'tcx, &'ll Value>],
ret_ty: Ty<'tcx>,
llret_ty: &'ll Type,
span: Span
let indices: Option<Vec<_>> = (0..n)
.map(|i| {
let arg_idx = i;
- let val = const_get_elt(vector, i as u64);
- match const_to_opt_u128(val, true) {
+ let val = bx.cx().const_get_elt(vector, i as u64);
+ match bx.cx().const_to_opt_u128(val, true) {
None => {
emit_error!("shuffle index #{} is not a constant", arg_idx);
None
arg_idx, total_len);
None
}
- Some(idx) => Some(C_i32(bx.cx, idx as i32)),
+ Some(idx) => Some(bx.cx().c_i32(idx as i32)),
}
})
.collect();
let indices = match indices {
Some(i) => i,
- None => return Ok(C_null(llret_ty))
+ None => return Ok(bx.cx().c_null(llret_ty))
};
return Ok(bx.shuffle_vector(args[0].immediate(),
args[1].immediate(),
- C_vector(&indices)))
+ bx.cx().c_vector(&indices)))
}
if name == "simd_insert" {
_ => return_error!("mask element type is `{}`, expected `i_`", m_elem_ty)
}
// truncate the mask to a vector of i1s
- let i1 = Type::i1(bx.cx);
+ let i1 = Type::i1(bx.cx());
let i1xn = Type::vector(i1, m_len as u64);
let m_i1s = bx.trunc(args[0].immediate(), i1xn);
return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
in_len: usize,
bx: &Builder<'a, 'll, 'tcx>,
span: Span,
- args: &[OperandRef<'ll, 'tcx>],
+ args: &[OperandRef<'tcx, &'ll Value>],
) -> Result<&'ll Value, ()> {
macro_rules! emit_error {
($msg: tt) => {
};
let llvm_name = &format!("llvm.{0}.v{1}{2}", name, in_len, ety);
- let intrinsic = bx.cx.get_intrinsic(&llvm_name);
+ let intrinsic = bx.cx().get_intrinsic(&llvm_name);
let c = bx.call(intrinsic,
&args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
None);
}
// Alignment of T, must be a constant integer value:
- let alignment_ty = Type::i32(bx.cx);
- let alignment = C_i32(bx.cx, bx.cx.align_of(in_elem).abi() as i32);
+ let alignment_ty = Type::i32(bx.cx());
+ let alignment = bx.cx().c_i32(bx.cx().align_of(in_elem).abi() as i32);
// Truncate the mask vector to a vector of i1s:
let (mask, mask_ty) = {
- let i1 = Type::i1(bx.cx);
+ let i1 = Type::i1(bx.cx());
let i1xn = Type::vector(i1, in_len as u64);
(bx.trunc(args[2].immediate(), i1xn), i1xn)
};
// Type of the vector of pointers:
- let llvm_pointer_vec_ty = llvm_vector_ty(bx.cx, underlying_ty, in_len, pointer_count);
+ let llvm_pointer_vec_ty = llvm_vector_ty(bx.cx(), underlying_ty, in_len, pointer_count);
let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count);
// Type of the vector of elements:
- let llvm_elem_vec_ty = llvm_vector_ty(bx.cx, underlying_ty, in_len, pointer_count - 1);
+ let llvm_elem_vec_ty = llvm_vector_ty(bx.cx(), underlying_ty, in_len, pointer_count - 1);
let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1);
let llvm_intrinsic = format!("llvm.masked.gather.{}.{}",
llvm_elem_vec_str, llvm_pointer_vec_str);
- let f = declare::declare_cfn(bx.cx, &llvm_intrinsic,
- Type::func(&[llvm_pointer_vec_ty, alignment_ty, mask_ty,
- llvm_elem_vec_ty], llvm_elem_vec_ty));
+ let f = declare::declare_cfn(bx.cx(), &llvm_intrinsic,
+ Type::func(&[
+ llvm_pointer_vec_ty,
+ alignment_ty,
+ mask_ty,
+ llvm_elem_vec_ty], llvm_elem_vec_ty));
llvm::SetUnnamedAddr(f, false);
let v = bx.call(f, &[args[1].immediate(), alignment, mask, args[0].immediate()],
None);
}
// Alignment of T, must be a constant integer value:
- let alignment_ty = Type::i32(bx.cx);
- let alignment = C_i32(bx.cx, bx.cx.align_of(in_elem).abi() as i32);
+ let alignment_ty = Type::i32(bx.cx());
+ let alignment = bx.cx().c_i32(bx.cx().align_of(in_elem).abi() as i32);
// Truncate the mask vector to a vector of i1s:
let (mask, mask_ty) = {
- let i1 = Type::i1(bx.cx);
+ let i1 = Type::i1(bx.cx());
let i1xn = Type::vector(i1, in_len as u64);
(bx.trunc(args[2].immediate(), i1xn), i1xn)
};
- let ret_t = Type::void(bx.cx);
+ let ret_t = Type::void(bx.cx());
// Type of the vector of pointers:
- let llvm_pointer_vec_ty = llvm_vector_ty(bx.cx, underlying_ty, in_len, pointer_count);
+ let llvm_pointer_vec_ty = llvm_vector_ty(bx.cx(), underlying_ty, in_len, pointer_count);
let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count);
// Type of the vector of elements:
- let llvm_elem_vec_ty = llvm_vector_ty(bx.cx, underlying_ty, in_len, pointer_count - 1);
+ let llvm_elem_vec_ty = llvm_vector_ty(bx.cx(), underlying_ty, in_len, pointer_count - 1);
let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1);
let llvm_intrinsic = format!("llvm.masked.scatter.{}.{}",
llvm_elem_vec_str, llvm_pointer_vec_str);
- let f = declare::declare_cfn(bx.cx, &llvm_intrinsic,
+ let f = declare::declare_cfn(bx.cx(), &llvm_intrinsic,
Type::func(&[llvm_elem_vec_ty,
llvm_pointer_vec_ty,
alignment_ty,
// code is generated
// * if the accumulator of the fmul isn't 1, incorrect
// code is generated
- match const_get_real(acc) {
+ match bx.cx().const_get_real(acc) {
None => return_error!("accumulator of {} is not a constant", $name),
Some((v, loses_info)) => {
if $name.contains("mul") && v != 1.0_f64 {
} else {
// unordered arithmetic reductions do not:
match f.bit_width() {
- 32 => C_undef(Type::f32(bx.cx)),
- 64 => C_undef(Type::f64(bx.cx)),
+ 32 => bx.cx().c_undef(Type::f32(bx.cx())),
+ 64 => bx.cx().c_undef(Type::f64(bx.cx())),
v => {
return_error!(r#"
unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
}
// boolean reductions operate on vectors of i1s:
- let i1 = Type::i1(bx.cx);
+ let i1 = Type::i1(bx.cx());
let i1xn = Type::vector(i1, in_len as u64);
bx.trunc(args[0].immediate(), i1xn)
};
if !$boolean {
r
} else {
- bx.zext(r, Type::bool(bx.cx))
+ bx.zext(r, Type::bool(bx.cx()))
}
)
},