self.layout.align,
bx.pointercast(llscratch, cx.i8p()),
scratch_align,
- cx.c_usize(self.layout.size.bytes()),
+ cx.const_usize(self.layout.size.bytes()),
MemFlags::empty());
bx.lifetime_end(llscratch, scratch_size);
let kind = llvm::LLVMGetMDKindIDInContext(bx.cx().llcx,
key.as_ptr() as *const c_char, key.len() as c_uint);
- let val: &'ll Value = bx.cx().c_i32(ia.ctxt.outer().as_u32() as i32);
+ let val: &'ll Value = bx.cx().const_i32(ia.ctxt.outer().as_u32() as i32);
llvm::LLVMSetMetadata(r, kind,
llvm::LLVMMDNodeInContext(bx.cx().llcx, &val, 1));
common::val_ty(v)
}
- fn c_bytes_in_context(&self, llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value {
- common::c_bytes_in_context(llcx, bytes)
+ fn const_bytes_in_context(&self, llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value {
+ common::const_bytes_in_context(llcx, bytes)
}
- fn c_struct_in_context(
+ fn const_struct_in_context(
&self,
llcx: &'a llvm::Context,
elts: &[&'a Value],
packed: bool,
) -> &'a Value {
- common::c_struct_in_context(llcx, elts, packed)
+ common::const_struct_in_context(llcx, elts, packed)
}
}
llcx: &llvm::Context,
llmod: &llvm::Module,
bitcode: Option<&[u8]>) {
- let llconst = cgcx.c_bytes_in_context(llcx, bitcode.unwrap_or(&[]));
+ let llconst = cgcx.const_bytes_in_context(llcx, bitcode.unwrap_or(&[]));
let llglobal = llvm::LLVMAddGlobal(
llmod,
cgcx.val_ty(llconst),
llvm::LLVMRustSetLinkage(llglobal, llvm::Linkage::PrivateLinkage);
llvm::LLVMSetGlobalConstant(llglobal, llvm::True);
- let llconst = cgcx.c_bytes_in_context(llcx, &[]);
+ let llconst = cgcx.const_bytes_in_context(llcx, &[]);
let llglobal = llvm::LLVMAddGlobal(
llmod,
cgcx.val_ty(llconst),
let (source, target) = cx.tcx.struct_lockstep_tails(source, target);
match (&source.sty, &target.sty) {
(&ty::Array(_, len), &ty::Slice(_)) => {
- cx.c_usize(len.unwrap_usize(cx.tcx))
+ cx.const_usize(len.unwrap_usize(cx.tcx))
}
(&ty::Dynamic(..), &ty::Dynamic(..)) => {
// For now, upcasts are limited to changes in marker
return;
}
- call_memcpy(bx, dst, dst_align, src, src_align, bx.cx().c_usize(size), flags);
+ call_memcpy(bx, dst, dst_align, src, src_align, bx.cx().const_usize(size), flags);
}
pub fn call_memset(
let ptr_width = &bx.cx().sess().target.target.target_pointer_width;
let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width);
let llintrinsicfn = bx.cx().get_intrinsic(&intrinsic_key);
- let volatile = bx.cx().c_bool(volatile);
+ let volatile = bx.cx().const_bool(volatile);
bx.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None)
}
DeflateEncoder::new(&mut compressed, Compression::fast())
.write_all(&metadata.raw_data).unwrap();
- let llmeta = llvm_module.c_bytes_in_context(metadata_llcx, &compressed);
- let llconst = llvm_module.c_struct_in_context(metadata_llcx, &[llmeta], false);
+ let llmeta = llvm_module.const_bytes_in_context(metadata_llcx, &compressed);
+ let llconst = llvm_module.const_struct_in_context(metadata_llcx, &[llmeta], false);
let name = exported_symbols::metadata_symbol_name(tcx);
let buf = CString::new(name).unwrap();
let llglobal = unsafe {
if !cx.used_statics.borrow().is_empty() {
let name = const_cstr!("llvm.used");
let section = const_cstr!("llvm.metadata");
- let array = cx.c_array(&cx.ptr_to(cx.i8()), &*cx.used_statics.borrow());
+ let array = cx.const_array(&cx.ptr_to(cx.i8()), &*cx.used_statics.borrow());
unsafe {
let g = llvm::LLVMAddGlobal(cx.llmod,
unsafe {
let llty = self.cx.val_ty(load);
let v = [
- self.cx.c_uint_big(llty, range.start),
- self.cx.c_uint_big(llty, range.end)
+ self.cx.const_uint_big(llty, range.start),
+ self.cx.const_uint_big(llty, range.end)
];
llvm::LLVMSetMetadata(load, llvm::MD_range as c_uint,
// *always* point to a metadata value of the integer 1.
//
// [1]: http://llvm.org/docs/LangRef.html#store-instruction
- let one = self.cx.c_i32(1);
+ let one = self.cx.const_i32(1);
let node = llvm::LLVMMDNodeInContext(self.cx.llcx, &one, 1);
llvm::LLVMSetMetadata(store, llvm::MD_nontemporal as c_uint, node);
}
unsafe {
let elt_ty = self.cx.val_ty(elt);
let undef = llvm::LLVMGetUndef(&self.cx().vector(elt_ty, num_elts as u64));
- let vec = self.insert_element(undef, elt, self.cx.c_i32(0));
+ let vec = self.insert_element(undef, elt, self.cx.const_i32(0));
let vec_i32_ty = &self.cx().vector(&self.cx().i32(), num_elts as u64);
- self.shuffle_vector(vec, undef, self.cx().c_null(vec_i32_ty))
+ self.shuffle_vector(vec, undef, self.cx().const_null(vec_i32_ty))
}
}
let lifetime_intrinsic = self.cx.get_intrinsic(intrinsic);
let ptr = self.pointercast(ptr, self.cx.i8p());
- self.call(lifetime_intrinsic, &[self.cx.c_u64(size), ptr], None);
+ self.call(lifetime_intrinsic, &[self.cx.const_u64(size), ptr], None);
}
fn call(&self, llfn: &'ll Value, args: &[&'ll Value],
impl<'ll, 'tcx: 'll> CommonMethods for CodegenCx<'ll, 'tcx> {
// LLVM constant constructors.
- fn c_null(&self, t: &'ll Type) -> &'ll Value {
+ fn const_null(&self, t: &'ll Type) -> &'ll Value {
unsafe {
llvm::LLVMConstNull(t)
}
}
- fn c_undef(&self, t: &'ll Type) -> &'ll Value {
+ fn const_undef(&self, t: &'ll Type) -> &'ll Value {
unsafe {
llvm::LLVMGetUndef(t)
}
}
- fn c_int(&self, t: &'ll Type, i: i64) -> &'ll Value {
+ fn const_int(&self, t: &'ll Type, i: i64) -> &'ll Value {
unsafe {
llvm::LLVMConstInt(t, i as u64, True)
}
}
- fn c_uint(&self, t: &'ll Type, i: u64) -> &'ll Value {
+ fn const_uint(&self, t: &'ll Type, i: u64) -> &'ll Value {
unsafe {
llvm::LLVMConstInt(t, i, False)
}
}
- fn c_uint_big(&self, t: &'ll Type, u: u128) -> &'ll Value {
+ fn const_uint_big(&self, t: &'ll Type, u: u128) -> &'ll Value {
unsafe {
let words = [u as u64, (u >> 64) as u64];
llvm::LLVMConstIntOfArbitraryPrecision(t, 2, words.as_ptr())
}
}
- fn c_bool(&self, val: bool) -> &'ll Value {
- &self.c_uint(&self.i1(), val as u64)
+ fn const_bool(&self, val: bool) -> &'ll Value {
+ &self.const_uint(&self.i1(), val as u64)
}
- fn c_i32(&self, i: i32) -> &'ll Value {
- &self.c_int(&self.i32(), i as i64)
+ fn const_i32(&self, i: i32) -> &'ll Value {
+ &self.const_int(&self.i32(), i as i64)
}
- fn c_u32(&self, i: u32) -> &'ll Value {
- &self.c_uint(&self.i32(), i as u64)
+ fn const_u32(&self, i: u32) -> &'ll Value {
+ &self.const_uint(&self.i32(), i as u64)
}
- fn c_u64(&self, i: u64) -> &'ll Value {
- &self.c_uint(&self.i64(), i)
+ fn const_u64(&self, i: u64) -> &'ll Value {
+ &self.const_uint(&self.i64(), i)
}
- fn c_usize(&self, i: u64) -> &'ll Value {
+ fn const_usize(&self, i: u64) -> &'ll Value {
let bit_size = self.data_layout().pointer_size.bits();
if bit_size < 64 {
// make sure it doesn't overflow
assert!(i < (1<<bit_size));
}
- &self.c_uint(&self.isize_ty, i)
+ &self.const_uint(&self.isize_ty, i)
}
- fn c_u8(&self, i: u8) -> &'ll Value {
- &self.c_uint(&self.i8(), i as u64)
+ fn const_u8(&self, i: u8) -> &'ll Value {
+ &self.const_uint(&self.i8(), i as u64)
}
// This is a 'c-like' raw string, which differs from
// our boxed-and-length-annotated strings.
- fn c_cstr(
+ fn const_cstr(
&self,
s: LocalInternedString,
null_terminated: bool,
// NB: Do not use `do_spill_noroot` to make this into a constant string, or
// you will be kicked off fast isel. See issue #4352 for an example of this.
- fn c_str_slice(&self, s: LocalInternedString) -> &'ll Value {
+ fn const_str_slice(&self, s: LocalInternedString) -> &'ll Value {
let len = s.len();
- let cs = consts::ptrcast(&self.c_cstr(s, false),
+ let cs = consts::ptrcast(&self.const_cstr(s, false),
&self.ptr_to(&self.layout_of(&self.tcx.mk_str()).llvm_type(&self)));
- &self.c_fat_ptr(cs, &self.c_usize(len as u64))
+ &self.const_fat_ptr(cs, &self.const_usize(len as u64))
}
- fn c_fat_ptr(
+ fn const_fat_ptr(
&self,
ptr: &'ll Value,
meta: &'ll Value
) -> &'ll Value {
assert_eq!(abi::FAT_PTR_ADDR, 0);
assert_eq!(abi::FAT_PTR_EXTRA, 1);
- &self.c_struct(&[ptr, meta], false)
+ &self.const_struct(&[ptr, meta], false)
}
- fn c_struct(
+ fn const_struct(
&self,
elts: &[&'ll Value],
packed: bool
) -> &'ll Value {
- &self.c_struct_in_context(&self.llcx, elts, packed)
+ &self.const_struct_in_context(&self.llcx, elts, packed)
}
- fn c_array(&self, ty: &'ll Type, elts: &[&'ll Value]) -> &'ll Value {
+ fn const_array(&self, ty: &'ll Type, elts: &[&'ll Value]) -> &'ll Value {
unsafe {
return llvm::LLVMConstArray(ty, elts.as_ptr(), elts.len() as c_uint);
}
}
- fn c_vector(&self, elts: &[&'ll Value]) -> &'ll Value {
+ fn const_vector(&self, elts: &[&'ll Value]) -> &'ll Value {
unsafe {
return llvm::LLVMConstVector(elts.as_ptr(), elts.len() as c_uint);
}
}
- fn c_bytes(&self, bytes: &[u8]) -> &'ll Value {
- &self.c_bytes_in_context(&self.llcx, bytes)
+ fn const_bytes(&self, bytes: &[u8]) -> &'ll Value {
+ &self.const_bytes_in_context(&self.llcx, bytes)
}
fn const_get_elt(&self, v: &'ll Value, idx: u64) -> &'ll Value {
}
}
-pub fn c_bytes_in_context(llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value {
+pub fn const_bytes_in_context(llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value {
unsafe {
let ptr = bytes.as_ptr() as *const c_char;
return llvm::LLVMConstStringInContext(llcx, ptr, bytes.len() as c_uint, True);
}
}
-pub fn c_struct_in_context(
+pub fn const_struct_in_context(
llcx: &'a llvm::Context,
elts: &[&'a Value],
packed: bool,
val_ty(v)
}
- fn c_bytes_in_context(&self, llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value {
- c_bytes_in_context(llcx, bytes)
+ fn const_bytes_in_context(&self, llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value {
+ const_bytes_in_context(llcx, bytes)
}
- fn c_struct_in_context(
+ fn const_struct_in_context(
&self,
llcx: &'a llvm::Context,
elts: &[&'a Value],
packed: bool,
) -> &'a Value {
- c_struct_in_context(llcx, elts, packed)
+ const_struct_in_context(llcx, elts, packed)
}
}
// i8/u8 can shift by at most 7, i16/u16 by at most 15, etc.
let val = bx.cx().int_width(llty) - 1;
if invert {
- bx.cx.c_int(mask_llty, !val as i64)
+ bx.cx.const_int(mask_llty, !val as i64)
} else {
- bx.cx.c_uint(mask_llty, val)
+ bx.cx.const_uint(mask_llty, val)
}
},
TypeKind::Vector => {
let gdb_debug_scripts_section = get_or_insert_gdb_debug_scripts_section_global(bx.cx());
// Load just the first byte as that's all that's necessary to force
// LLVM to keep around the reference to the global.
- let indices = [bx.cx().c_i32(0), bx.cx().c_i32(0)];
+ let indices = [bx.cx().const_i32(0), bx.cx().const_i32(0)];
let element = bx.inbounds_gep(gdb_debug_scripts_section, &indices);
let volative_load_instruction = bx.volatile_load(element);
unsafe {
bug!("symbol `{}` is already defined", section_var_name)
});
llvm::LLVMSetSection(section_var, section_name.as_ptr() as *const _);
- llvm::LLVMSetInitializer(section_var, cx.c_bytes(section_contents));
+ llvm::LLVMSetInitializer(section_var, cx.const_bytes(section_contents));
llvm::LLVMSetGlobalConstant(section_var, llvm::True);
llvm::LLVMSetUnnamedAddr(section_var, llvm::True);
llvm::LLVMRustSetLinkage(section_var, llvm::Linkage::LinkOnceODRLinkage);
member_description.offset.bits(),
match member_description.discriminant {
None => None,
- Some(value) => Some(cx.c_u64(value)),
+ Some(value) => Some(cx.const_u64(value)),
},
member_description.flags,
member_description.type_metadata))
let (size, align) = bx.cx().size_and_align_of(t);
debug!("size_and_align_of_dst t={} info={:?} size: {:?} align: {:?}",
t, info, size, align);
- let size = bx.cx().c_usize(size.bytes());
- let align = bx.cx().c_usize(align.abi());
+ let size = bx.cx().const_usize(size.bytes());
+ let align = bx.cx().const_usize(align.abi());
return (size, align);
}
match t.sty {
// The info in this case is the length of the str, so the size is that
// times the unit size.
let (size, align) = bx.cx().size_and_align_of(unit);
- (bx.mul(info.unwrap(), bx.cx().c_usize(size.bytes())),
- bx.cx().c_usize(align.abi()))
+ (bx.mul(info.unwrap(), bx.cx().const_usize(size.bytes())),
+ bx.cx().const_usize(align.abi()))
}
_ => {
let cx = bx.cx();
let sized_align = layout.align.abi();
debug!("DST {} statically sized prefix size: {} align: {}",
t, sized_size, sized_align);
- let sized_size = cx.c_usize(sized_size);
- let sized_align = cx.c_usize(sized_align);
+ let sized_size = cx.const_usize(sized_size);
+ let sized_align = cx.const_usize(sized_align);
// Recurse to get the size of the dynamically sized field (must be
// the last field).
(Some(sized_align), Some(unsized_align)) => {
// If both alignments are constant, (the sized_align should always be), then
// pick the correct alignment statically.
- cx.c_usize(std::cmp::max(sized_align, unsized_align) as u64)
+ cx.const_usize(std::cmp::max(sized_align, unsized_align) as u64)
}
_ => bx.select(bx.icmp(IntPredicate::IntUGT, sized_align, unsized_align),
sized_align,
//
// `(size + (align-1)) & -align`
- let addend = bx.sub(align, bx.cx().c_usize(1));
+ let addend = bx.sub(align, bx.cx().const_usize(1));
let size = bx.and(bx.add(size, addend), bx.neg(align));
(size, align)
pub trait CommonMethods: Backend + CommonWriteMethods {
// Constant constructors
- fn c_null(&self, t: Self::Type) -> Self::Value;
- fn c_undef(&self, t: Self::Type) -> Self::Value;
- fn c_int(&self, t: Self::Type, i: i64) -> Self::Value;
- fn c_uint(&self, t: Self::Type, i: u64) -> Self::Value;
- fn c_uint_big(&self, t: Self::Type, u: u128) -> Self::Value;
- fn c_bool(&self, val: bool) -> Self::Value;
- fn c_i32(&self, i: i32) -> Self::Value;
- fn c_u32(&self, i: u32) -> Self::Value;
- fn c_u64(&self, i: u64) -> Self::Value;
- fn c_usize(&self, i: u64) -> Self::Value;
- fn c_u8(&self, i: u8) -> Self::Value;
- fn c_cstr(
+ fn const_null(&self, t: Self::Type) -> Self::Value;
+ fn const_undef(&self, t: Self::Type) -> Self::Value;
+ fn const_int(&self, t: Self::Type, i: i64) -> Self::Value;
+ fn const_uint(&self, t: Self::Type, i: u64) -> Self::Value;
+ fn const_uint_big(&self, t: Self::Type, u: u128) -> Self::Value;
+ fn const_bool(&self, val: bool) -> Self::Value;
+ fn const_i32(&self, i: i32) -> Self::Value;
+ fn const_u32(&self, i: u32) -> Self::Value;
+ fn const_u64(&self, i: u64) -> Self::Value;
+ fn const_usize(&self, i: u64) -> Self::Value;
+ fn const_u8(&self, i: u8) -> Self::Value;
+ fn const_cstr(
&self,
s: LocalInternedString,
null_terminated: bool,
) -> Self::Value;
- fn c_str_slice(&self, s: LocalInternedString) -> Self::Value;
- fn c_fat_ptr(
+ fn const_str_slice(&self, s: LocalInternedString) -> Self::Value;
+ fn const_fat_ptr(
&self,
ptr: Self::Value,
meta: Self::Value
) -> Self::Value;
- fn c_struct(
+ fn const_struct(
&self,
elts: &[Self::Value],
packed: bool
) -> Self::Value;
- fn c_array(&self, ty: Self::Type, elts: &[Self::Value]) -> Self::Value;
- fn c_vector(&self, elts: &[Self::Value]) -> Self::Value;
- fn c_bytes(&self, bytes: &[u8]) -> Self::Value;
+ fn const_array(&self, ty: Self::Type, elts: &[Self::Value]) -> Self::Value;
+ fn const_vector(&self, elts: &[Self::Value]) -> Self::Value;
+ fn const_bytes(&self, bytes: &[u8]) -> Self::Value;
fn const_get_elt(&self, v: Self::Value, idx: u64) -> Self::Value;
fn const_get_real(&self, v: Self::Value) -> Option<(f64, bool)>;
pub trait CommonWriteMethods: Backend {
fn val_ty(&self, v: Self::Value) -> Self::Type;
- fn c_bytes_in_context(&self, llcx: Self::Context, bytes: &[u8]) -> Self::Value;
- fn c_struct_in_context(
+ fn const_bytes_in_context(&self, llcx: Self::Context, bytes: &[u8]) -> Self::Value;
+ fn const_struct_in_context(
&self,
llcx: Self::Context,
elts: &[Self::Value],
},
"likely" => {
let expect = cx.get_intrinsic(&("llvm.expect.i1"));
- bx.call(expect, &[args[0].immediate(), bx.cx().c_bool(true)], None)
+ bx.call(expect, &[args[0].immediate(), bx.cx().const_bool(true)], None)
}
"unlikely" => {
let expect = cx.get_intrinsic(&("llvm.expect.i1"));
- bx.call(expect, &[args[0].immediate(), bx.cx().c_bool(false)], None)
+ bx.call(expect, &[args[0].immediate(), bx.cx().const_bool(false)], None)
}
"try" => {
try_intrinsic(bx, cx,
}
"size_of" => {
let tp_ty = substs.type_at(0);
- cx.c_usize(cx.size_of(tp_ty).bytes())
+ cx.const_usize(cx.size_of(tp_ty).bytes())
}
"size_of_val" => {
let tp_ty = substs.type_at(0);
glue::size_and_align_of_dst(bx, tp_ty, Some(meta));
llsize
} else {
- cx.c_usize(cx.size_of(tp_ty).bytes())
+ cx.const_usize(cx.size_of(tp_ty).bytes())
}
}
"min_align_of" => {
let tp_ty = substs.type_at(0);
- cx.c_usize(cx.align_of(tp_ty).abi())
+ cx.const_usize(cx.align_of(tp_ty).abi())
}
"min_align_of_val" => {
let tp_ty = substs.type_at(0);
glue::size_and_align_of_dst(bx, tp_ty, Some(meta));
llalign
} else {
- cx.c_usize(cx.align_of(tp_ty).abi())
+ cx.const_usize(cx.align_of(tp_ty).abi())
}
}
"pref_align_of" => {
let tp_ty = substs.type_at(0);
- cx.c_usize(cx.align_of(tp_ty).pref())
+ cx.const_usize(cx.align_of(tp_ty).pref())
}
"type_name" => {
let tp_ty = substs.type_at(0);
let ty_name = Symbol::intern(&tp_ty.to_string()).as_str();
- cx.c_str_slice(ty_name)
+ cx.const_str_slice(ty_name)
}
"type_id" => {
- cx.c_u64(cx.tcx.type_id_hash(substs.type_at(0)))
+ cx.const_u64(cx.tcx.type_id_hash(substs.type_at(0)))
}
"init" => {
let ty = substs.type_at(0);
false,
ty,
llresult,
- cx.c_u8(0),
- cx.c_usize(1)
+ cx.const_u8(0),
+ cx.const_usize(1)
);
}
return;
"needs_drop" => {
let tp_ty = substs.type_at(0);
- cx.c_bool(bx.cx().type_needs_drop(tp_ty))
+ cx.const_bool(bx.cx().type_needs_drop(tp_ty))
}
"offset" => {
let ptr = args[0].immediate();
};
bx.call(expect, &[
args[0].immediate(),
- cx.c_i32(rw),
+ cx.const_i32(rw),
args[1].immediate(),
- cx.c_i32(cache_type)
+ cx.const_i32(cache_type)
], None)
},
"ctlz" | "ctlz_nonzero" | "cttz" | "cttz_nonzero" | "ctpop" | "bswap" |
Some((width, signed)) =>
match name {
"ctlz" | "cttz" => {
- let y = cx.c_bool(false);
+ let y = cx.const_bool(false);
let llfn = cx.get_intrinsic(&format!("llvm.{}.i{}", name, width));
bx.call(llfn, &[args[0].immediate(), y], None)
}
"ctlz_nonzero" | "cttz_nonzero" => {
- let y = cx.c_bool(true);
+ let y = cx.const_bool(true);
let llvm_name = &format!("llvm.{}.i{}", &name[..4], width);
let llfn = cx.get_intrinsic(llvm_name);
bx.call(llfn, &[args[0].immediate(), y], None)
} else {
// rotate_left: (X << (S % BW)) | (X >> ((BW - S) % BW))
// rotate_right: (X << ((BW - S) % BW)) | (X >> (S % BW))
- let width = cx.c_uint(cx.ix(width), width);
+ let width = cx.const_uint(cx.ix(width), width);
let shift = bx.urem(raw_shift, width);
let inv_shift = bx.urem(bx.sub(width, raw_shift), width);
let shift1 = bx.shl(val, if is_left { shift } else { inv_shift });
) -> &'ll Value {
let cx = bx.cx();
let (size, align) = cx.size_and_align_of(ty);
- let size = cx.c_usize(size.bytes());
+ let size = cx.const_usize(size.bytes());
let align = align.abi();
let dst_ptr = bx.pointercast(dst, cx.i8p());
let src_ptr = bx.pointercast(src, cx.i8p());
) -> &'ll Value {
let cx = bx.cx();
let (size, align) = cx.size_and_align_of(ty);
- let size = cx.c_usize(size.bytes());
- let align = cx.c_i32(align.abi() as i32);
+ let size = cx.const_usize(size.bytes());
+ let align = cx.const_i32(align.abi() as i32);
let dst = bx.pointercast(dst, cx.i8p());
call_memset(bx, dst, val, bx.mul(size, count), align, volatile)
}
if bx.sess().no_landing_pads() {
bx.call(func, &[data], None);
let ptr_align = bx.tcx().data_layout.pointer_align;
- bx.store(cx.c_null(cx.i8p()), dest, ptr_align);
+ bx.store(cx.const_null(cx.i8p()), dest, ptr_align);
} else if wants_msvc_seh(bx.sess()) {
codegen_msvc_try(bx, cx, func, data, local_ptr, dest);
} else {
let slot = bx.alloca(i64p, "slot", ptr_align);
bx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(), None);
- normal.ret(cx.c_i32(0));
+ normal.ret(cx.const_i32(0));
let cs = catchswitch.catch_switch(None, None, 1);
catchswitch.add_handler(cs, catchpad.llbb());
Some(did) => ::consts::get_static(cx, did),
None => bug!("msvc_try_filter not defined"),
};
- let tok = catchpad.catch_pad(cs, &[tydesc, cx.c_i32(0), slot]);
+ let tok = catchpad.catch_pad(cs, &[tydesc, cx.const_i32(0), slot]);
let addr = catchpad.load(slot, ptr_align);
let i64_align = bx.tcx().data_layout.i64_align;
let arg1 = catchpad.load(addr, i64_align);
- let val1 = cx.c_i32(1);
+ let val1 = cx.const_i32(1);
let arg2 = catchpad.load(catchpad.inbounds_gep(addr, &[val1]), i64_align);
let local_ptr = catchpad.bitcast(local_ptr, i64p);
catchpad.store(arg1, local_ptr, i64_align);
catchpad.store(arg2, catchpad.inbounds_gep(local_ptr, &[val1]), i64_align);
catchpad.catch_ret(tok, caught.llbb());
- caught.ret(cx.c_i32(1));
+ caught.ret(cx.const_i32(1));
});
// Note that no invoke is used here because by definition this function
let data = llvm::get_param(bx.llfn(), 1);
let local_ptr = llvm::get_param(bx.llfn(), 2);
bx.invoke(func, &[data], then.llbb(), catch.llbb(), None);
- then.ret(cx.c_i32(0));
+ then.ret(cx.const_i32(0));
// Type indicator for the exception being thrown.
//
// rust_try ignores the selector.
let lpad_ty = cx.struct_(&[cx.i8p(), cx.i32()], false);
let vals = catch.landing_pad(lpad_ty, bx.cx().eh_personality(), 1);
- catch.add_clause(vals, bx.cx().c_null(cx.i8p()));
+ catch.add_clause(vals, bx.cx().const_null(cx.i8p()));
let ptr = catch.extract_value(vals, 0);
let ptr_align = bx.tcx().data_layout.pointer_align;
catch.store(ptr, catch.bitcast(local_ptr, cx.ptr_to(cx.i8p())), ptr_align);
- catch.ret(cx.c_i32(1));
+ catch.ret(cx.const_i32(1));
});
// Note that no invoke is used here because by definition this function
arg_idx, total_len);
None
}
- Some(idx) => Some(bx.cx().c_i32(idx as i32)),
+ Some(idx) => Some(bx.cx().const_i32(idx as i32)),
}
})
.collect();
let indices = match indices {
Some(i) => i,
- None => return Ok(bx.cx().c_null(llret_ty))
+ None => return Ok(bx.cx().const_null(llret_ty))
};
return Ok(bx.shuffle_vector(args[0].immediate(),
args[1].immediate(),
- bx.cx().c_vector(&indices)))
+ bx.cx().const_vector(&indices)))
}
if name == "simd_insert" {
// Alignment of T, must be a constant integer value:
let alignment_ty = bx.cx().i32();
- let alignment = bx.cx().c_i32(bx.cx().align_of(in_elem).abi() as i32);
+ let alignment = bx.cx().const_i32(bx.cx().align_of(in_elem).abi() as i32);
// Truncate the mask vector to a vector of i1s:
let (mask, mask_ty) = {
// Alignment of T, must be a constant integer value:
let alignment_ty = bx.cx().i32();
- let alignment = bx.cx().c_i32(bx.cx().align_of(in_elem).abi() as i32);
+ let alignment = bx.cx().const_i32(bx.cx().align_of(in_elem).abi() as i32);
// Truncate the mask vector to a vector of i1s:
let (mask, mask_ty) = {
} else {
// unordered arithmetic reductions do not:
match f.bit_width() {
- 32 => bx.cx().c_undef(bx.cx().f32()),
- 64 => bx.cx().c_undef(bx.cx().f64()),
+ 32 => bx.cx().const_undef(bx.cx().f32()),
+ 64 => bx.cx().const_undef(bx.cx().f64()),
v => {
return_error!(r#"
unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
common::val_ty(v)
}
- fn c_bytes_in_context(&self, llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value {
- common::c_bytes_in_context(llcx, bytes)
+ fn const_bytes_in_context(&self, llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value {
+ common::const_bytes_in_context(llcx, bytes)
}
- fn c_struct_in_context(
+ fn const_struct_in_context(
&self,
llcx: &'a llvm::Context,
elts: &[&'a Value],
packed: bool,
) -> &'a Value {
- common::c_struct_in_context(llcx, elts, packed)
+ common::const_struct_in_context(llcx, elts, packed)
}
}
);
let ptr_align = bx.tcx().data_layout.pointer_align;
let ptr = bx.load(
- bx.inbounds_gep(llvtable, &[bx.cx().c_usize(self.0)]),
+ bx.inbounds_gep(llvtable, &[bx.cx().const_usize(self.0)]),
ptr_align
);
bx.nonnull_metadata(ptr);
let llvtable = bx.pointercast(llvtable, bx.cx().ptr_to(bx.cx().isize()));
let usize_align = bx.tcx().data_layout.pointer_align;
let ptr = bx.load(
- bx.inbounds_gep(llvtable, &[bx.cx().c_usize(self.0)]),
+ bx.inbounds_gep(llvtable, &[bx.cx().const_usize(self.0)]),
usize_align
);
// Vtable loads are invariant
}
// Not in the cache. Build it.
- let nullptr = cx.c_null(cx.i8p());
+ let nullptr = cx.const_null(cx.i8p());
let methods = tcx.vtable_methods(trait_ref.with_self_ty(tcx, ty));
let methods = methods.iter().cloned().map(|opt_mth| {
// /////////////////////////////////////////////////////////////////////////////////////////////
let components: Vec<_> = [
callee::get_fn(cx, monomorphize::resolve_drop_in_place(cx.tcx, ty)),
- cx.c_usize(size.bytes()),
- cx.c_usize(align.abi())
+ cx.const_usize(size.bytes()),
+ cx.const_usize(align.abi())
].iter().cloned().chain(methods).collect();
- let vtable_const = cx.c_struct(&components, false);
+ let vtable_const = cx.const_struct(&components, false);
let align = cx.data_layout().pointer_align;
let vtable = consts::addr_of(cx, vtable_const, align, Some("vtable"));
slot.storage_dead(&bx);
if !bx.sess().target.target.options.custom_unwind_resume {
- let mut lp = bx.cx().c_undef(self.landing_pad_type());
+ let mut lp = bx.cx().const_undef(self.landing_pad_type());
lp = bx.insert_value(lp, lp0, 0);
lp = bx.insert_value(lp, lp1, 1);
bx.resume(lp);
}
} else {
let switch_llty = bx.cx().layout_of(switch_ty).immediate_llvm_type(bx.cx());
- let llval = bx.cx().c_uint_big(switch_llty, values[0]);
+ let llval = bx.cx().const_uint_big(switch_llty, values[0]);
let cmp = bx.icmp(IntPredicate::IntEQ, discr.immediate(), llval);
bx.cond_br(cmp, lltrue, llfalse);
}
values.len());
let switch_llty = bx.cx().layout_of(switch_ty).immediate_llvm_type(bx.cx());
for (&value, target) in values.iter().zip(targets) {
- let llval =bx.cx().c_uint_big(switch_llty, value);
+ let llval =bx.cx().const_uint_big(switch_llty, value);
let llbb = llblock(self, *target);
bx.add_case(switch, llval, llbb)
}
// Pass the condition through llvm.expect for branch hinting.
let expect = bx.cx().get_intrinsic(&"llvm.expect.i1");
- let cond = bx.call(expect, &[cond, bx.cx().c_bool(expected)], None);
+ let cond = bx.call(expect, &[cond, bx.cx().const_bool(expected)], None);
// Create the failure block and the conditional branch to it.
let lltarget = llblock(self, target);
// Get the location information.
let loc = bx.sess().source_map().lookup_char_pos(span.lo());
let filename = Symbol::intern(&loc.file.name.to_string()).as_str();
- let filename = bx.cx().c_str_slice(filename);
- let line = bx.cx().c_u32(loc.line as u32);
- let col = bx.cx().c_u32(loc.col.to_usize() as u32 + 1);
+ let filename = bx.cx().const_str_slice(filename);
+ let line = bx.cx().const_u32(loc.line as u32);
+ let col = bx.cx().const_u32(loc.col.to_usize() as u32 + 1);
let align = tcx.data_layout.aggregate_align
.max(tcx.data_layout.i32_align)
.max(tcx.data_layout.pointer_align);
let len = self.codegen_operand(&mut bx, len).immediate();
let index = self.codegen_operand(&mut bx, index).immediate();
- let file_line_col = bx.cx().c_struct(&[filename, line, col], false);
+ let file_line_col = bx.cx().const_struct(&[filename, line, col], false);
let file_line_col = consts::addr_of(bx.cx(),
file_line_col,
align,
_ => {
let str = msg.description();
let msg_str = Symbol::intern(str).as_str();
- let msg_str = bx.cx().c_str_slice(msg_str);
- let msg_file_line_col = bx.cx().c_struct(
+ let msg_str = bx.cx().const_str_slice(msg_str);
+ let msg_file_line_col = bx.cx().const_struct(
&[msg_str, filename, line, col],
false
);
{
let loc = bx.sess().source_map().lookup_char_pos(span.lo());
let filename = Symbol::intern(&loc.file.name.to_string()).as_str();
- let filename = bx.cx.c_str_slice(filename);
- let line = bx.cx.c_u32(loc.line as u32);
- let col = bx.cx.c_u32(loc.col.to_usize() as u32 + 1);
+ let filename = bx.cx.const_str_slice(filename);
+ let line = bx.cx.const_u32(loc.line as u32);
+ let col = bx.cx.const_u32(loc.col.to_usize() as u32 + 1);
let align = tcx.data_layout.aggregate_align
.max(tcx.data_layout.i32_align)
.max(tcx.data_layout.pointer_align);
if intrinsic == Some("init") { "zeroed" } else { "uninitialized" }
);
let msg_str = Symbol::intern(&str).as_str();
- let msg_str = bx.cx.c_str_slice(msg_str);
- let msg_file_line_col = bx.cx.c_struct(
+ let msg_str = bx.cx.const_str_slice(msg_str);
+ let msg_file_line_col = bx.cx.const_struct(
&[msg_str, filename, line, col],
false,
);
let dest = match ret_dest {
_ if fn_ty.ret.is_indirect() => llargs[0],
ReturnDest::Nothing => {
- bx.cx().c_undef(bx.cx().ptr_to(fn_ty.ret.memory_ty(bx.cx())))
+ bx.cx().const_undef(bx.cx().ptr_to(fn_ty.ret.memory_ty(bx.cx())))
}
ReturnDest::IndirectOperand(dst, _) |
ReturnDest::Store(dst) => dst.llval,
arg: &ArgType<'tcx, Ty<'tcx>>) {
// Fill padding with undef value, where applicable.
if let Some(ty) = arg.pad {
- llargs.push(bx.cx().c_undef(ty.llvm_type(bx.cx())));
+ llargs.push(bx.cx().const_undef(ty.llvm_type(bx.cx())));
}
if arg.is_ignore() {
match cv {
Scalar::Bits { size: 0, .. } => {
assert_eq!(0, layout.value.size(cx).bytes());
- cx.c_undef(cx.ix(0))
+ cx.const_undef(cx.ix(0))
},
Scalar::Bits { bits, size } => {
assert_eq!(size as u64, layout.value.size(cx).bytes());
- let llval = cx.c_uint_big(cx.ix(bitsize), bits);
+ let llval = cx.const_uint_big(cx.ix(bitsize), bits);
if layout.value == layout::Pointer {
unsafe { llvm::LLVMConstIntToPtr(llval, llty) }
} else {
};
let llval = unsafe { llvm::LLVMConstInBoundsGEP(
consts::bitcast(base_addr, cx.i8p()),
- &cx.c_usize(ptr.offset.bytes()),
+ &cx.const_usize(ptr.offset.bytes()),
1,
) };
if layout.value != layout::Pointer {
assert_eq!(offset as usize as u64, offset);
let offset = offset as usize;
if offset > next_offset {
- llvals.push(cx.c_bytes(&alloc.bytes[next_offset..offset]));
+ llvals.push(cx.const_bytes(&alloc.bytes[next_offset..offset]));
}
let ptr_offset = read_target_uint(
dl.endian,
next_offset = offset + pointer_size;
}
if alloc.bytes.len() >= next_offset {
- llvals.push(cx.c_bytes(&alloc.bytes[next_offset ..]));
+ llvals.push(cx.const_bytes(&alloc.bytes[next_offset ..]));
}
- cx.c_struct(&llvals, true)
+ cx.const_struct(&llvals, true)
}
pub fn codegen_static_initializer(
bug!("simd shuffle field {:?}", field)
}
}).collect();
- let llval = bx.cx().c_struct(&values?, false);
+ let llval = bx.cx().const_struct(&values?, false);
Ok((llval, c.ty))
})
.unwrap_or_else(|_| {
// We've errored, so we don't have to produce working code.
let ty = self.monomorphize(&ty);
let llty = bx.cx().layout_of(ty).llvm_type(bx.cx());
- (bx.cx().c_undef(llty), ty)
+ (bx.cx().const_undef(llty), ty)
})
}
}
// C++ personality function, but `catch (...)` has no type so
// it's null. The 64 here is actually a bitfield which
// represents that this is a catch-all block.
- let null = bx.cx().c_null(bx.cx().i8p());
- let sixty_four = bx.cx().c_i32(64);
+ let null = bx.cx().const_null(bx.cx().i8p());
+ let sixty_four = bx.cx().const_i32(64);
cleanup = cp_bx.catch_pad(cs, &[null, sixty_four, null]);
cp_bx.br(llbb);
}
layout: TyLayout<'tcx>) -> OperandRef<'tcx, &'ll Value> {
assert!(layout.is_zst());
OperandRef {
- val: OperandValue::Immediate(cx.c_undef(layout.immediate_llvm_type(cx))),
+ val: OperandValue::Immediate(cx.const_undef(layout.immediate_llvm_type(cx))),
layout
}
}
debug!("Operand::immediate_or_packed_pair: packing {:?} into {:?}",
self, llty);
// Reconstruct the immediate aggregate.
- let mut llpair = bx.cx().c_undef(llty);
+ let mut llpair = bx.cx().const_undef(llty);
llpair = bx.insert_value(llpair, base::from_immediate(bx, a), 0);
llpair = bx.insert_value(llpair, base::from_immediate(bx, b), 1);
llpair
// `#[repr(simd)]` types are also immediate.
(OperandValue::Immediate(llval), &layout::Abi::Vector { .. }) => {
OperandValue::Immediate(
- bx.extract_element(llval, bx.cx().c_usize(i as u64)))
+ bx.extract_element(llval, bx.cx().const_usize(i as u64)))
}
_ => bug!("OperandRef::extract_field({:?}): not applicable", self)
// We've errored, so we don't have to produce working code.
let layout = bx.cx().layout_of(ty);
PlaceRef::new_sized(
- bx.cx().c_undef(bx.cx().ptr_to(layout.llvm_type(bx.cx()))),
+ bx.cx().const_undef(bx.cx().ptr_to(layout.llvm_type(bx.cx()))),
layout,
layout.align,
).load(bx)
let llval = unsafe { LLVMConstInBoundsGEP(
consts::bitcast(base_addr, bx.cx().i8p()),
- &bx.cx().c_usize(offset.bytes()),
+ &bx.cx().const_usize(offset.bytes()),
1,
)};
let llval = consts::bitcast(llval, bx.cx().ptr_to(layout.llvm_type(bx.cx())));
assert_eq!(count, 0);
self.llextra.unwrap()
} else {
- cx.c_usize(count)
+ cx.const_usize(count)
}
} else {
bug!("unexpected layout `{:#?}` in PlaceRef::len", self.layout)
let meta = self.llextra;
- let unaligned_offset = cx.c_usize(offset.bytes());
+ let unaligned_offset = cx.const_usize(offset.bytes());
// Get the alignment of the field
let (_, unsized_align) = glue::size_and_align_of_dst(bx, field.ty, meta);
// (unaligned offset + (align - 1)) & -align
// Calculate offset
- let align_sub_1 = bx.sub(unsized_align, cx.c_usize(1u64));
+ let align_sub_1 = bx.sub(unsized_align, cx.const_usize(1u64));
let offset = bx.and(bx.add(unaligned_offset, align_sub_1),
bx.neg(unsized_align));
) -> &'ll Value {
let cast_to = bx.cx().layout_of(cast_to).immediate_llvm_type(bx.cx());
if self.layout.abi.is_uninhabited() {
- return bx.cx().c_undef(cast_to);
+ return bx.cx().const_undef(cast_to);
}
match self.layout.variants {
layout::Variants::Single { index } => {
let discr_val = self.layout.ty.ty_adt_def().map_or(
index.as_u32() as u128,
|def| def.discriminant_for_variant(bx.cx().tcx, index).val);
- return bx.cx().c_uint_big(cast_to, discr_val);
+ return bx.cx().const_uint_big(cast_to, discr_val);
}
layout::Variants::Tagged { .. } |
layout::Variants::NicheFilling { .. } => {},
// FIXME(eddyb) Check the actual primitive type here.
let niche_llval = if niche_start == 0 {
// HACK(eddyb) Using `c_null` as it works on all types.
- bx.cx().c_null(niche_llty)
+ bx.cx().const_null(niche_llty)
} else {
- bx.cx().c_uint_big(niche_llty, niche_start)
+ bx.cx().const_uint_big(niche_llty, niche_start)
};
bx.select(bx.icmp(IntPredicate::IntEQ, lldiscr, niche_llval),
- bx.cx().c_uint(cast_to, niche_variants.start().as_u32() as u64),
- bx.cx().c_uint(cast_to, dataful_variant.as_u32() as u64))
+ bx.cx().const_uint(cast_to, niche_variants.start().as_u32() as u64),
+ bx.cx().const_uint(cast_to, dataful_variant.as_u32() as u64))
} else {
// Rebase from niche values to discriminant values.
let delta = niche_start.wrapping_sub(niche_variants.start().as_u32() as u128);
- let lldiscr = bx.sub(lldiscr, bx.cx().c_uint_big(niche_llty, delta));
+ let lldiscr = bx.sub(lldiscr, bx.cx().const_uint_big(niche_llty, delta));
let lldiscr_max =
- bx.cx().c_uint(niche_llty, niche_variants.end().as_u32() as u64);
+ bx.cx().const_uint(niche_llty, niche_variants.end().as_u32() as u64);
bx.select(bx.icmp(IntPredicate::IntULE, lldiscr, lldiscr_max),
bx.intcast(lldiscr, cast_to, false),
- bx.cx().c_uint(cast_to, dataful_variant.as_u32() as u64))
+ bx.cx().const_uint(cast_to, dataful_variant.as_u32() as u64))
}
}
}
.discriminant_for_variant(bx.tcx(), variant_index)
.val;
bx.store(
- bx.cx().c_uint_big(ptr.layout.llvm_type(bx.cx()), to),
+ bx.cx().const_uint_big(ptr.layout.llvm_type(bx.cx()), to),
ptr.llval,
ptr.align);
}
// Issue #34427: As workaround for LLVM bug on ARM,
// use memset of 0 before assigning niche value.
let llptr = bx.pointercast(self.llval, bx.cx().ptr_to(bx.cx().i8()));
- let fill_byte = bx.cx().c_u8(0);
+ let fill_byte = bx.cx().const_u8(0);
let (size, align) = self.layout.size_and_align();
- let size = bx.cx().c_usize(size.bytes());
- let align = bx.cx().c_u32(align.abi() as u32);
+ let size = bx.cx().const_usize(size.bytes());
+ let align = bx.cx().const_u32(align.abi() as u32);
base::call_memset(bx, llptr, fill_byte, size, align, false);
}
// FIXME(eddyb) Check the actual primitive type here.
let niche_llval = if niche_value == 0 {
// HACK(eddyb) Using `c_null` as it works on all types.
- bx.cx().c_null(niche_llty)
+ bx.cx().const_null(niche_llty)
} else {
- bx.cx().c_uint_big(niche_llty, niche_value)
+ bx.cx().const_uint_big(niche_llty, niche_value)
};
OperandValue::Immediate(niche_llval).store(bx, niche);
}
pub fn project_index(&self, bx: &Builder<'a, 'll, 'tcx>, llindex: &'ll Value)
-> PlaceRef<'tcx, &'ll Value> {
PlaceRef {
- llval: bx.inbounds_gep(self.llval, &[bx.cx().c_usize(0), llindex]),
+ llval: bx.inbounds_gep(self.llval, &[bx.cx().const_usize(0), llindex]),
llextra: None,
layout: self.layout.field(bx.cx(), 0),
align: self.align
// so we generate an abort
let fnname = bx.cx().get_intrinsic(&("llvm.trap"));
bx.call(fnname, &[], None);
- let llval = bx.cx().c_undef(bx.cx().ptr_to(layout.llvm_type(bx.cx())));
+ let llval = bx.cx().const_undef(bx.cx().ptr_to(layout.llvm_type(bx.cx())));
PlaceRef::new_sized(llval, layout, layout.align)
}
}
mir::ProjectionElem::ConstantIndex { offset,
from_end: false,
min_length: _ } => {
- let lloffset = bx.cx().c_usize(offset as u64);
+ let lloffset = bx.cx().const_usize(offset as u64);
cg_base.project_index(bx, lloffset)
}
mir::ProjectionElem::ConstantIndex { offset,
from_end: true,
min_length: _ } => {
- let lloffset = bx.cx().c_usize(offset as u64);
+ let lloffset = bx.cx().const_usize(offset as u64);
let lllen = cg_base.len(bx.cx());
let llindex = bx.sub(lllen, lloffset);
cg_base.project_index(bx, llindex)
}
mir::ProjectionElem::Subslice { from, to } => {
let mut subslice = cg_base.project_index(bx,
- bx.cx().c_usize(from as u64));
+ bx.cx().const_usize(from as u64));
let projected_ty = PlaceTy::Ty { ty: cg_base.layout.ty }
.projection_ty(tcx, &projection.elem)
.to_ty(bx.tcx());
if subslice.layout.is_unsized() {
subslice.llextra = Some(bx.sub(cg_base.llextra.unwrap(),
- bx.cx().c_usize((from as u64) + (to as u64))));
+ bx.cx().const_usize((from as u64) + (to as u64))));
}
// Cast the place pointer type to the new
return bx;
}
- let start = dest.project_index(&bx, bx.cx().c_usize(0)).llval;
+ let start = dest.project_index(&bx, bx.cx().const_usize(0)).llval;
if let OperandValue::Immediate(v) = cg_elem.val {
- let align = bx.cx().c_i32(dest.align.abi() as i32);
- let size = bx.cx().c_usize(dest.layout.size.bytes());
+ let align = bx.cx().const_i32(dest.align.abi() as i32);
+ let size = bx.cx().const_usize(dest.layout.size.bytes());
// Use llvm.memset.p0i8.* to initialize all zero arrays
if bx.cx().is_const_integral(v) && bx.cx().const_to_uint(v) == 0 {
- let fill = bx.cx().c_u8(0);
+ let fill = bx.cx().const_u8(0);
base::call_memset(&bx, start, fill, size, align, false);
return bx;
}
}
}
- let count = bx.cx().c_usize(count);
+ let count = bx.cx().const_usize(count);
let end = dest.project_index(&bx, count).llval;
let header_bx = bx.build_sibling_block("repeat_loop_header");
cg_elem.val.store(&body_bx,
PlaceRef::new_sized(current, cg_elem.layout, dest.align));
- let next = body_bx.inbounds_gep(current, &[bx.cx().c_usize(1)]);
+ let next = body_bx.inbounds_gep(current, &[bx.cx().const_usize(1)]);
body_bx.br(header_bx.llbb());
header_bx.add_incoming_to_phi(current, next, body_bx.llbb());
assert!(cast.is_llvm_immediate());
let ll_t_out = cast.immediate_llvm_type(bx.cx());
if operand.layout.abi.is_uninhabited() {
- let val = OperandValue::Immediate(bx.cx().c_undef(ll_t_out));
+ let val = OperandValue::Immediate(bx.cx().const_undef(ll_t_out));
return (bx, OperandRef {
val,
layout: cast,
let discr_val = def
.discriminant_for_variant(bx.cx().tcx, index)
.val;
- let discr = bx.cx().c_uint_big(ll_t_out, discr_val);
+ let discr = bx.cx().const_uint_big(ll_t_out, discr_val);
return (bx, OperandRef {
val: OperandValue::Immediate(discr),
layout: cast,
base::call_assume(&bx, bx.icmp(
IntPredicate::IntULE,
llval,
- bx.cx().c_uint_big(ll_t_in, *scalar.valid_range.end())
+ bx.cx().const_uint_big(ll_t_in, *scalar.valid_range.end())
));
}
}
mir::Rvalue::NullaryOp(mir::NullOp::SizeOf, ty) => {
assert!(bx.cx().type_is_sized(ty));
- let val = bx.cx().c_usize(bx.cx().size_of(ty).bytes());
+ let val = bx.cx().const_usize(bx.cx().size_of(ty).bytes());
let tcx = bx.tcx();
(bx, OperandRef {
val: OperandValue::Immediate(val),
mir::Rvalue::NullaryOp(mir::NullOp::Box, content_ty) => {
let content_ty: Ty<'tcx> = self.monomorphize(&content_ty);
let (size, align) = bx.cx().size_and_align_of(content_ty);
- let llsize = bx.cx().c_usize(size.bytes());
- let llalign = bx.cx().c_usize(align.abi());
+ let llsize = bx.cx().const_usize(size.bytes());
+ let llalign = bx.cx().const_usize(align.abi());
let box_layout = bx.cx().layout_of(bx.tcx().mk_box(content_ty));
let llty_ptr = box_layout.llvm_type(bx.cx());
if let LocalRef::Operand(Some(op)) = self.locals[index] {
if let ty::Array(_, n) = op.layout.ty.sty {
let n = n.unwrap_usize(bx.cx().tcx);
- return bx.cx().c_usize(n);
+ return bx.cx().const_usize(n);
}
}
}
mir::BinOp::Shr => common::build_unchecked_rshift(bx, input_ty, lhs, rhs),
mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt |
mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => if is_unit {
- bx.cx().c_bool(match op {
+ bx.cx().const_bool(match op {
mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt => false,
mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => true,
_ => unreachable!()
// while the current crate doesn't use overflow checks.
if !bx.cx().check_overflow {
let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
- return OperandValue::Pair(val, bx.cx().c_bool(false));
+ return OperandValue::Pair(val, bx.cx().const_bool(false));
}
let (val, of) = match op {
let invert_mask = common::shift_mask_val(&bx, lhs_llty, rhs_llty, true);
let outer_bits = bx.and(rhs, invert_mask);
- let of = bx.icmp(IntPredicate::IntNE, outer_bits, bx.cx().c_null(rhs_llty));
+ let of = bx.icmp(IntPredicate::IntNE, outer_bits, bx.cx().const_null(rhs_llty));
let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
(val, of)
use rustc_apfloat::Float;
const MAX_F32_PLUS_HALF_ULP: u128 = ((1 << (Single::PRECISION + 1)) - 1)
<< (Single::MAX_EXP - Single::PRECISION as i16);
- let max = bx.cx().c_uint_big(int_ty, MAX_F32_PLUS_HALF_ULP);
+ let max = bx.cx().const_uint_big(int_ty, MAX_F32_PLUS_HALF_ULP);
let overflow = bx.icmp(IntPredicate::IntUGE, x, max);
- let infinity_bits = bx.cx().c_u32(ieee::Single::INFINITY.to_bits() as u32);
+ let infinity_bits = bx.cx().const_u32(ieee::Single::INFINITY.to_bits() as u32);
let infinity = consts::bitcast(infinity_bits, float_ty);
bx.select(overflow, infinity, bx.uitofp(x, float_ty))
} else {
let float_bits_to_llval = |bits| {
let bits_llval = match bx.cx().float_width(float_ty) {
- 32 => bx.cx().c_u32(bits as u32),
- 64 => bx.cx().c_u64(bits as u64),
+ 32 => bx.cx().const_u32(bits as u32),
+ 64 => bx.cx().const_u64(bits as u64),
n => bug!("unsupported float width {}", n),
};
consts::bitcast(bits_llval, float_ty)
// performed is ultimately up to the backend, but at least x86 does perform them.
let less_or_nan = bx.fcmp(RealPredicate::RealULT, x, f_min);
let greater = bx.fcmp(RealPredicate::RealOGT, x, f_max);
- let int_max = bx.cx().c_uint_big(int_ty, int_max(signed, int_ty));
- let int_min = bx.cx().c_uint_big(int_ty, int_min(signed, int_ty) as u128);
+ let int_max = bx.cx().const_uint_big(int_ty, int_max(signed, int_ty));
+ let int_min = bx.cx().const_uint_big(int_ty, int_min(signed, int_ty) as u128);
let s0 = bx.select(less_or_nan, int_min, fptosui_result);
let s1 = bx.select(greater, int_max, s0);
// Therefore we only need to execute this step for signed integer types.
if signed {
// LLVM has no isNaN predicate, so we use (x == x) instead
- bx.select(bx.fcmp(RealPredicate::RealOEQ, x, x), s1, bx.cx().c_uint(int_ty, 0))
+ bx.select(bx.fcmp(RealPredicate::RealOEQ, x, x), s1, bx.cx().const_uint(int_ty, 0))
} else {
s1
}