impl<'tcx, B: Backend + 'static> FunctionCx<'_, 'tcx, B> {
/// Instance must be monomorphized
pub(crate) fn get_function_ref(&mut self, inst: Instance<'tcx>) -> FuncRef {
- let func_id = import_function(selfcodegen_cx.tcx, selfcodegen_cx.module, inst);
+ let func_id = import_function(self.codegen_cx.tcx, self.codegen_cx.module, inst);
let func_ref = self
- codegen_cx.module
+ .codegen_cx.module
.declare_func_in_func(func_id, &mut self.bcx.func);
#[cfg(debug_assertions)]
call_conv: CallConv::triple_default(self.triple()),
};
let func_id = self
- codegen_cx.module
+ .codegen_cx.module
.declare_function(&name, Linkage::Import, &sig)
.unwrap();
let func_ref = self
- codegen_cx.module
+ .codegen_cx.module
.declare_func_in_func(func_id, &mut self.bcx.func);
let call_inst = self.bcx.ins().call(func_ref, args);
#[cfg(debug_assertions)]
.collect::<Vec<(Local, ArgKind<'tcx>, Ty<'tcx>)>>();
assert!(fx.caller_location.is_none());
- if fx.instance.def.requires_caller_location(fxcodegen_cx.tcx) {
+ if fx.instance.def.requires_caller_location(fx.codegen_cx.tcx) {
// Store caller location for `#[track_caller]`.
- fx.caller_location = Some(cvalue_for_param(fx, start_block, None, None, fxcodegen_cx.tcx.caller_location_ty()).unwrap());
+ fx.caller_location = Some(cvalue_for_param(fx, start_block, None, None, fx.codegen_cx.tcx.caller_location_ty()).unwrap());
}
fx.bcx.switch_to_block(start_block);
let local_decl = &fx.mir.local_decls[local];
// v this ! is important
let internally_mutable = !val.layout().ty.is_freeze(
- fxcodegen_cx.tcx.at(local_decl.source_info.span),
+ fx.codegen_cx.tcx.at(local_decl.source_info.span),
ParamEnv::reveal_all(),
);
if local_decl.mutability == mir::Mutability::Not && !internally_mutable {
args: &[Operand<'tcx>],
destination: Option<(Place<'tcx>, BasicBlock)>,
) {
- let fn_ty = fx.monomorphize(&func.ty(fx.mir, fxcodegen_cx.tcx));
+ let fn_ty = fx.monomorphize(&func.ty(fx.mir, fx.codegen_cx.tcx));
let fn_sig = fx
- codegen_cx.tcx
- .normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), &fn_ty.fn_sig(fxcodegen_cx.tcx));
+ .codegen_cx.tcx
+ .normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), &fn_ty.fn_sig(fx.codegen_cx.tcx));
let destination = destination.map(|(place, bb)| (trans_place(fx, place), bb));
// Handle special calls like instrinsics and empty drop glue.
let instance = if let ty::FnDef(def_id, substs) = fn_ty.kind {
- let instance = ty::Instance::resolve(fxcodegen_cx.tcx, ty::ParamEnv::reveal_all(), def_id, substs)
+ let instance = ty::Instance::resolve(fx.codegen_cx.tcx, ty::ParamEnv::reveal_all(), def_id, substs)
.unwrap()
.unwrap()
- .polymorphize(fxcodegen_cx.tcx);
+ .polymorphize(fx.codegen_cx.tcx);
- if fxcodegen_cx.tcx.symbol_name(instance).name.starts_with("llvm.") {
+ if fx.codegen_cx.tcx.symbol_name(instance).name.starts_with("llvm.") {
crate::intrinsics::codegen_llvm_intrinsic_call(
fx,
- &fxcodegen_cx.tcx.symbol_name(instance).name,
+ &fx.codegen_cx.tcx.symbol_name(instance).name,
substs,
args,
destination,
let is_cold =
instance.map(|inst|
- fxcodegen_cx.tcx.codegen_fn_attrs(inst.def_id())
+ fx.codegen_cx.tcx.codegen_fn_attrs(inst.def_id())
.flags.contains(CodegenFnAttrFlags::COLD))
.unwrap_or(false);
if is_cold {
nop_inst,
format!(
"virtual call; self arg pass mode: {:?}",
- get_pass_mode(fxcodegen_cx.tcx, args[0].layout())
+ get_pass_mode(fx.codegen_cx.tcx, args[0].layout())
),
);
}
)
.collect::<Vec<_>>();
- if instance.map(|inst| inst.def.requires_caller_location(fxcodegen_cx.tcx)).unwrap_or(false) {
+ if instance.map(|inst| inst.def.requires_caller_location(fx.codegen_cx.tcx)).unwrap_or(false) {
// Pass the caller location for `#[track_caller]`.
let caller_location = fx.get_caller_location(span);
call_args.extend(adjust_arg_for_abi(fx, caller_location).into_iter());
let call_inst = if let Some(func_ref) = func_ref {
let sig = clif_sig_from_fn_sig(
- fxcodegen_cx.tcx,
+ fx.codegen_cx.tcx,
fx.triple(),
fn_sig,
span,
// FIXME find a cleaner way to support varargs
if fn_sig.c_variadic {
if fn_sig.abi != Abi::C {
- fxcodegen_cx.tcx.sess.span_fatal(span, &format!("Variadic call for non-C abi {:?}", fn_sig.abi));
+ fx.codegen_cx.tcx.sess.span_fatal(span, &format!("Variadic call for non-C abi {:?}", fn_sig.abi));
}
let sig_ref = fx.bcx.func.dfg.call_signature(call_inst).unwrap();
let abi_params = call_args
let ty = fx.bcx.func.dfg.value_type(arg);
if !ty.is_int() {
// FIXME set %al to upperbound on float args once floats are supported
- fxcodegen_cx.tcx.sess.span_fatal(span, &format!("Non int ty {:?} for variadic call", ty));
+ fx.codegen_cx.tcx.sess.span_fatal(span, &format!("Non int ty {:?} for variadic call", ty));
}
AbiParam::new(ty)
})
drop_place: CPlace<'tcx>,
) {
let ty = drop_place.layout().ty;
- let drop_fn = Instance::resolve_drop_in_place(fxcodegen_cx.tcx, ty).polymorphize(fxcodegen_cx.tcx);
+ let drop_fn = Instance::resolve_drop_in_place(fx.codegen_cx.tcx, ty).polymorphize(fx.codegen_cx.tcx);
if let ty::InstanceDef::DropGlue(_, None) = drop_fn.def {
// we don't actually need to drop anything
} else {
- let drop_fn_ty = drop_fn.ty(fxcodegen_cx.tcx, ParamEnv::reveal_all());
- let fn_sig = fxcodegen_cx.tcx.normalize_erasing_late_bound_regions(
+ let drop_fn_ty = drop_fn.ty(fx.codegen_cx.tcx, ParamEnv::reveal_all());
+ let fn_sig = fx.codegen_cx.tcx.normalize_erasing_late_bound_regions(
ParamEnv::reveal_all(),
- &drop_fn_ty.fn_sig(fxcodegen_cx.tcx),
+ &drop_fn_ty.fn_sig(fx.codegen_cx.tcx),
);
- assert_eq!(fn_sig.output(), fxcodegen_cx.tcx.mk_unit());
+ assert_eq!(fn_sig.output(), fx.codegen_cx.tcx.mk_unit());
match ty.kind {
ty::Dynamic(..) => {
let drop_fn = crate::vtable::drop_fn_of_obj(fx, vtable.unwrap());
let sig = clif_sig_from_fn_sig(
- fxcodegen_cx.tcx,
+ fx.codegen_cx.tcx,
fx.triple(),
fn_sig,
span,
let arg_place = CPlace::new_stack_slot(
fx,
- fx.layout_of(fxcodegen_cx.tcx.mk_ref(
+ fx.layout_of(fx.codegen_cx.tcx.mk_ref(
&ty::RegionKind::ReErased,
TypeAndMut {
ty,
let mut call_args: Vec<Value> = arg_value.into_iter().collect::<Vec<_>>();
- if drop_fn.def.requires_caller_location(fxcodegen_cx.tcx) {
+ if drop_fn.def.requires_caller_location(fx.codegen_cx.tcx) {
// Pass the caller location for `#[track_caller]`.
let caller_location = fx.get_caller_location(span);
call_args.extend(adjust_arg_for_abi(fx, caller_location).into_iter());
fx: &mut FunctionCx<'_, 'tcx, impl Backend>,
arg: CValue<'tcx>,
) -> EmptySinglePair<Value> {
- match get_pass_mode(fxcodegen_cx.tcx, arg.layout()) {
+ match get_pass_mode(fx.codegen_cx.tcx, arg.layout()) {
PassMode::NoPass => Empty,
PassMode::ByVal(_) => Single(arg.load_scalar(fx)),
PassMode::ByValPair(_, _) => {
arg_ty: Ty<'tcx>,
) -> Option<CValue<'tcx>> {
let layout = fx.layout_of(arg_ty);
- let pass_mode = get_pass_mode(fxcodegen_cx.tcx, layout);
+ let pass_mode = get_pass_mode(fx.codegen_cx.tcx, layout);
if let PassMode::NoPass = pass_mode {
return None;
}
- let clif_types = pass_mode.get_param_ty(fxcodegen_cx.tcx);
+ let clif_types = pass_mode.get_param_ty(fx.codegen_cx.tcx);
let block_params = clif_types.map(|t| fx.bcx.append_block_param(start_block, t));
#[cfg(debug_assertions)]
start_block: Block,
) {
let ret_layout = return_layout(fx);
- let ret_pass_mode = get_pass_mode(fxcodegen_cx.tcx, ret_layout);
+ let ret_pass_mode = get_pass_mode(fx.codegen_cx.tcx, ret_layout);
let ret_param = match ret_pass_mode {
PassMode::NoPass => {
fx.local_map
) -> (Inst, T) {
let ret_layout = fx.layout_of(fn_sig.output());
- let output_pass_mode = get_pass_mode(fxcodegen_cx.tcx, ret_layout);
+ let output_pass_mode = get_pass_mode(fx.codegen_cx.tcx, ret_layout);
let return_ptr = match output_pass_mode {
PassMode::NoPass => None,
PassMode::ByRef { size: Some(_)} => match ret_place {
}
pub(crate) fn codegen_return(fx: &mut FunctionCx<'_, '_, impl Backend>) {
- match get_pass_mode(fxcodegen_cx.tcx, return_layout(fx)) {
+ match get_pass_mode(fx.codegen_cx.tcx, return_layout(fx)) {
PassMode::NoPass | PassMode::ByRef { size: Some(_) } => {
fx.bcx.ins().return_(&[]);
}
match &bb.terminator().kind {
TerminatorKind::Call { destination, .. } => {
if let Some((dest_place, _dest_bb)) = destination {
- let dest_layout = fx.layout_of(fx.monomorphize(&dest_place.ty(&fx.mir.local_decls, fxcodegen_cx.tcx).ty));
- if !crate::abi::can_return_to_ssa_var(fxcodegen_cx.tcx, dest_layout) {
+ let dest_layout = fx.layout_of(fx.monomorphize(&dest_place.ty(&fx.mir.local_decls, fx.codegen_cx.tcx).ty));
+ if !crate::abi::can_return_to_ssa_var(fx.codegen_cx.tcx, dest_layout) {
not_ssa(&mut flag_map, dest_place.local)
}
}
}
pub(crate) fn lock_global_lock(fx: &mut FunctionCx<'_, '_, impl Backend>) {
- let atomic_mutex = fxcodegen_cx.module.declare_data(
+ let atomic_mutex = fx.codegen_cx.module.declare_data(
"__cg_clif_global_atomic_mutex",
Linkage::Import,
true,
None,
).unwrap();
- let pthread_mutex_lock = fxcodegen_cx.module.declare_function("pthread_mutex_lock", Linkage::Import, &cranelift_codegen::ir::Signature {
- call_conv: fxcodegen_cx.module.target_config().default_call_conv,
+ let pthread_mutex_lock = fx.codegen_cx.module.declare_function("pthread_mutex_lock", Linkage::Import, &cranelift_codegen::ir::Signature {
+ call_conv: fx.codegen_cx.module.target_config().default_call_conv,
params: vec![
- AbiParam::new(fxcodegen_cx.module.target_config().pointer_type() /* *mut pthread_mutex_t */),
+ AbiParam::new(fx.codegen_cx.module.target_config().pointer_type() /* *mut pthread_mutex_t */),
],
returns: vec![AbiParam::new(types::I32 /* c_int */)],
}).unwrap();
- let pthread_mutex_lock = fxcodegen_cx.module.declare_func_in_func(pthread_mutex_lock, fx.bcx.func);
+ let pthread_mutex_lock = fx.codegen_cx.module.declare_func_in_func(pthread_mutex_lock, fx.bcx.func);
- let atomic_mutex = fxcodegen_cx.module.declare_data_in_func(atomic_mutex, fx.bcx.func);
- let atomic_mutex = fx.bcx.ins().global_value(fxcodegen_cx.module.target_config().pointer_type(), atomic_mutex);
+ let atomic_mutex = fx.codegen_cx.module.declare_data_in_func(atomic_mutex, fx.bcx.func);
+ let atomic_mutex = fx.bcx.ins().global_value(fx.codegen_cx.module.target_config().pointer_type(), atomic_mutex);
fx.bcx.ins().call(pthread_mutex_lock, &[atomic_mutex]);
}
pub(crate) fn unlock_global_lock(fx: &mut FunctionCx<'_, '_, impl Backend>) {
- let atomic_mutex = fxcodegen_cx.module.declare_data(
+ let atomic_mutex = fx.codegen_cx.module.declare_data(
"__cg_clif_global_atomic_mutex",
Linkage::Import,
true,
None,
).unwrap();
- let pthread_mutex_unlock = fxcodegen_cx.module.declare_function("pthread_mutex_unlock", Linkage::Import, &cranelift_codegen::ir::Signature {
- call_conv: fxcodegen_cx.module.target_config().default_call_conv,
+ let pthread_mutex_unlock = fx.codegen_cx.module.declare_function("pthread_mutex_unlock", Linkage::Import, &cranelift_codegen::ir::Signature {
+ call_conv: fx.codegen_cx.module.target_config().default_call_conv,
params: vec![
- AbiParam::new(fxcodegen_cx.module.target_config().pointer_type() /* *mut pthread_mutex_t */),
+ AbiParam::new(fx.codegen_cx.module.target_config().pointer_type() /* *mut pthread_mutex_t */),
],
returns: vec![AbiParam::new(types::I32 /* c_int */)],
}).unwrap();
- let pthread_mutex_unlock = fxcodegen_cx.module.declare_func_in_func(pthread_mutex_unlock, fx.bcx.func);
+ let pthread_mutex_unlock = fx.codegen_cx.module.declare_func_in_func(pthread_mutex_unlock, fx.bcx.func);
- let atomic_mutex = fxcodegen_cx.module.declare_data_in_func(atomic_mutex, fx.bcx.func);
- let atomic_mutex = fx.bcx.ins().global_value(fxcodegen_cx.module.target_config().pointer_type(), atomic_mutex);
+ let atomic_mutex = fx.codegen_cx.module.declare_data_in_func(atomic_mutex, fx.bcx.func);
+ let atomic_mutex = fx.bcx.ins().global_value(fx.codegen_cx.module.target_config().pointer_type(), atomic_mutex);
fx.bcx.ins().call(pthread_mutex_unlock, &[atomic_mutex]);
}
instance: Instance<'tcx>,
linkage: Linkage,
) {
- let tcx = cxcodegen_cx.tcx;
+ let tcx = cx.codegen_cx.tcx;
let mir = tcx.instance_mir(instance.def);
// Declare function
- let (name, sig) = get_function_name_and_sig(tcx, cxcodegen_cx.module.isa().triple(), instance, false);
- let func_id = cxcodegen_cx.module.declare_function(&name, linkage, &sig).unwrap();
+ let (name, sig) = get_function_name_and_sig(tcx, cx.codegen_cx.module.isa().triple(), instance, false);
+ let func_id = cx.codegen_cx.module.declare_function(&name, linkage, &sig).unwrap();
// Make FunctionBuilder
let context = &mut cx.cached_context;
let block_map: IndexVec<BasicBlock, Block> = (0..mir.basic_blocks().len()).map(|_| bcx.create_block()).collect();
// Make FunctionCx
- let pointer_type = cxcodegen_cx.module.target_config().pointer_type();
+ let pointer_type = cx.codegen_cx.module.target_config().pointer_type();
let clif_comments = crate::pretty_clif::CommentWriter::new(tcx, instance);
let mut fx = FunctionCx {
tcx,
- module: &mut cxcodegen_cx.module,
+ module: &mut cx.codegen_cx.module,
global_asm: &mut cx.global_asm,
pointer_type,
cold_blocks: EntitySet::new(),
clif_comments,
- constants_cx: &mut cxcodegen_cx.constants_cx,
+ constants_cx: &mut cx.codegen_cx.constants_cx,
vtables: &mut cx.vtables,
source_info_set: indexmap::IndexSet::new(),
next_ssa_var: 0,
let cold_blocks = fx.cold_blocks;
crate::pretty_clif::write_clif_file(
- cxcodegen_cx.tcx,
+ cx.codegen_cx.tcx,
"unopt",
None,
instance,
// instruction, which doesn't have an encoding.
context.compute_cfg();
context.compute_domtree();
- context.eliminate_unreachable_code(cxcodegen_cx.module.isa()).unwrap();
+ context.eliminate_unreachable_code(cx.codegen_cx.module.isa()).unwrap();
// Define function
- let module = &mut cxcodegen_cx.module;
+ let module = &mut cx.codegen_cx.module;
tcx.sess.time(
"define function",
|| module.define_function(
// Write optimized function to file for debugging
crate::pretty_clif::write_clif_file(
- cxcodegen_cx.tcx,
+ cx.codegen_cx.tcx,
"opt",
- Some(cxcodegen_cx.module.isa()),
+ Some(cx.codegen_cx.module.isa()),
instance,
&context,
&clif_comments,
);
// Define debuginfo for function
- let isa = cxcodegen_cx.module.isa();
+ let isa = cx.codegen_cx.module.isa();
let debug_context = &mut cx.debug_context;
let unwind_context = &mut cx.unwind_context;
tcx.sess.time("generate debug info", || {
target,
cleanup: _,
} => {
- if !fxcodegen_cx.tcx.sess.overflow_checks() {
+ if !fx.codegen_cx.tcx.sess.overflow_checks() {
if let mir::AssertKind::OverflowNeg(_) = *msg {
let target = fx.get_block(*target);
fx.bcx.ins().jump(target, &[]);
}
};
- let def_id = fxcodegen_cx.tcx.lang_items().require(lang_item).unwrap_or_else(|s| {
- fxcodegen_cx.tcx.sess.span_fatal(bb_data.terminator().source_info.span, &s)
+ let def_id = fx.codegen_cx.tcx.lang_items().require(lang_item).unwrap_or_else(|s| {
+ fx.codegen_cx.tcx.sess.span_fatal(bb_data.terminator().source_info.span, &s)
});
- let instance = Instance::mono(fxcodegen_cx.tcx, def_id).polymorphize(fxcodegen_cx.tcx);
- let symbol_name = fxcodegen_cx.tcx.symbol_name(instance).name;
+ let instance = Instance::mono(fx.codegen_cx.tcx, def_id).polymorphize(fx.codegen_cx.tcx);
+ let symbol_name = fx.codegen_cx.tcx.symbol_name(instance).name;
fx.lib_call(&*symbol_name, vec![fx.pointer_type, fx.pointer_type, fx.pointer_type], vec![], &args);
cleanup: _,
from_hir_call: _,
} => {
- fxcodegen_cx.tcx.sess.time("codegen call", || crate::abi::codegen_terminator_call(
+ fx.codegen_cx.tcx.sess.time("codegen call", || crate::abi::codegen_terminator_call(
fx,
*fn_span,
block,
let lhs = trans_operand(fx, lhs);
let rhs = trans_operand(fx, rhs);
- let res = if !fxcodegen_cx.tcx.sess.overflow_checks() {
+ let res = if !fx.codegen_cx.tcx.sess.overflow_checks() {
let val =
crate::num::trans_int_binop(fx, *bin_op, lhs, rhs).load_scalar(fx);
let is_overflow = fx.bcx.ins().iconst(types::I8, 0);
lval.write_cvalue(fx, res);
}
Rvalue::Cast(CastKind::Pointer(PointerCast::ReifyFnPointer), operand, to_ty) => {
- let from_ty = fx.monomorphize(&operand.ty(&fx.mir.local_decls, fxcodegen_cx.tcx));
+ let from_ty = fx.monomorphize(&operand.ty(&fx.mir.local_decls, fx.codegen_cx.tcx));
let to_layout = fx.layout_of(fx.monomorphize(to_ty));
match from_ty.kind {
ty::FnDef(def_id, substs) => {
let func_ref = fx.get_function_ref(
- Instance::resolve_for_fn_ptr(fxcodegen_cx.tcx, ParamEnv::reveal_all(), def_id, substs)
+ Instance::resolve_for_fn_ptr(fx.codegen_cx.tcx, ParamEnv::reveal_all(), def_id, substs)
.unwrap()
- .polymorphize(fxcodegen_cx.tcx),
+ .polymorphize(fx.codegen_cx.tcx),
);
let func_addr = fx.bcx.ins().func_addr(fx.pointer_type, func_ref);
lval.write_cvalue(fx, CValue::by_val(func_addr, to_layout));
|ty::TypeAndMut {
ty: pointee_ty,
mutbl: _,
- }| has_ptr_meta(fxcodegen_cx.tcx, pointee_ty),
+ }| has_ptr_meta(fx.codegen_cx.tcx, pointee_ty),
)
.unwrap_or(false)
}
match &operand.layout().variants {
Variants::Single { index } => {
- let discr = operand.layout().ty.discriminant_for_variant(fxcodegen_cx.tcx, *index).unwrap();
+ let discr = operand.layout().ty.discriminant_for_variant(fx.codegen_cx.tcx, *index).unwrap();
let discr = if discr.ty.is_signed() {
rustc_middle::mir::interpret::sign_extend(discr.val, fx.layout_of(discr.ty).size)
} else {
match operand.layout().ty.kind {
ty::Closure(def_id, substs) => {
let instance = Instance::resolve_closure(
- fxcodegen_cx.tcx,
+ fx.codegen_cx.tcx,
def_id,
substs,
ty::ClosureKind::FnOnce,
- ).polymorphize(fxcodegen_cx.tcx);
+ ).polymorphize(fx.codegen_cx.tcx);
let func_ref = fx.get_function_ref(instance);
let func_addr = fx.bcx.ins().func_addr(fx.pointer_type, func_ref);
lval.write_cvalue(fx, CValue::by_val(func_addr, lval.layout()));
let operand = trans_operand(fx, operand);
let times = fx
.monomorphize(times)
- .eval(fxcodegen_cx.tcx, ParamEnv::reveal_all())
+ .eval(fx.codegen_cx.tcx, ParamEnv::reveal_all())
.val
- .try_to_bits(fxcodegen_cx.tcx.data_layout.pointer_size)
+ .try_to_bits(fx.codegen_cx.tcx.data_layout.pointer_size)
.unwrap();
for i in 0..times {
let index = fx.bcx.ins().iconst(fx.pointer_type, i as i64);
}
Rvalue::Len(place) => {
let place = trans_place(fx, *place);
- let usize_layout = fx.layout_of(fxcodegen_cx.tcx.types.usize);
+ let usize_layout = fx.layout_of(fx.codegen_cx.tcx.types.usize);
let len = codegen_array_len(fx, place);
lval.write_cvalue(fx, CValue::by_val(len, usize_layout));
}
Rvalue::NullaryOp(NullOp::Box, content_ty) => {
use rustc_hir::lang_items::ExchangeMallocFnLangItem;
- let usize_type = fx.clif_type(fxcodegen_cx.tcx.types.usize).unwrap();
+ let usize_type = fx.clif_type(fx.codegen_cx.tcx.types.usize).unwrap();
let content_ty = fx.monomorphize(content_ty);
let layout = fx.layout_of(content_ty);
let llsize = fx.bcx.ins().iconst(usize_type, layout.size.bytes() as i64);
.bcx
.ins()
.iconst(usize_type, layout.align.abi.bytes() as i64);
- let box_layout = fx.layout_of(fxcodegen_cx.tcx.mk_box(content_ty));
+ let box_layout = fx.layout_of(fx.codegen_cx.tcx.mk_box(content_ty));
// Allocate space:
- let def_id = match fxcodegen_cx.tcx.lang_items().require(ExchangeMallocFnLangItem) {
+ let def_id = match fx.codegen_cx.tcx.lang_items().require(ExchangeMallocFnLangItem) {
Ok(id) => id,
Err(s) => {
- fxcodegen_cx.tcx
+ fx.codegen_cx.tcx
.sess
.fatal(&format!("allocation of `{}` {}", box_layout.ty, s));
}
};
- let instance = ty::Instance::mono(fxcodegen_cx.tcx, def_id).polymorphize(fxcodegen_cx.tcx);
+ let instance = ty::Instance::mono(fx.codegen_cx.tcx, def_id).polymorphize(fx.codegen_cx.tcx);
let func_ref = fx.get_function_ref(instance);
let call = fx.bcx.ins().call(func_ref, &[llsize, llalign]);
let ptr = fx.bcx.inst_results(call)[0];
assert!(lval
.layout()
.ty
- .is_sized(fxcodegen_cx.tcx.at(stmt.source_info.span), ParamEnv::reveal_all()));
+ .is_sized(fx.codegen_cx.tcx.at(stmt.source_info.span), ParamEnv::reveal_all()));
let ty_size = fx.layout_of(fx.monomorphize(ty)).size.bytes();
- let val = CValue::const_val(fx, fx.layout_of(fxcodegen_cx.tcx.types.usize), ty_size.into());
+ let val = CValue::const_val(fx, fx.layout_of(fx.codegen_cx.tcx.types.usize), ty_size.into());
lval.write_cvalue(fx, val);
}
Rvalue::Aggregate(kind, operands) => match **kind {
let (eax, ebx, ecx, edx) = crate::intrinsics::codegen_cpuid_call(fx, leaf, subleaf);
assert_eq!(outputs.len(), 4);
- trans_place(fx, outputs[0]).write_cvalue(fx, CValue::by_val(eax, fx.layout_of(fxcodegen_cx.tcx.types.u32)));
- trans_place(fx, outputs[1]).write_cvalue(fx, CValue::by_val(ebx, fx.layout_of(fxcodegen_cx.tcx.types.u32)));
- trans_place(fx, outputs[2]).write_cvalue(fx, CValue::by_val(ecx, fx.layout_of(fxcodegen_cx.tcx.types.u32)));
- trans_place(fx, outputs[3]).write_cvalue(fx, CValue::by_val(edx, fx.layout_of(fxcodegen_cx.tcx.types.u32)));
+ trans_place(fx, outputs[0]).write_cvalue(fx, CValue::by_val(eax, fx.layout_of(fx.codegen_cx.tcx.types.u32)));
+ trans_place(fx, outputs[1]).write_cvalue(fx, CValue::by_val(ebx, fx.layout_of(fx.codegen_cx.tcx.types.u32)));
+ trans_place(fx, outputs[2]).write_cvalue(fx, CValue::by_val(ecx, fx.layout_of(fx.codegen_cx.tcx.types.u32)));
+ trans_place(fx, outputs[3]).write_cvalue(fx, CValue::by_val(edx, fx.layout_of(fx.codegen_cx.tcx.types.u32)));
}
"xgetbv" => {
assert_eq!(input_names, &[Symbol::intern("{ecx}")]);
crate::trap::trap_unimplemented(fx, "_xgetbv arch intrinsic is not supported");
}
// ___chkstk, ___chkstk_ms and __alloca are only used on Windows
- _ if fxcodegen_cx.tcx.symbol_name(fx.instance).name.starts_with("___chkstk") => {
+ _ if fx.codegen_cx.tcx.symbol_name(fx.instance).name.starts_with("___chkstk") => {
crate::trap::trap_unimplemented(fx, "Stack probes are not supported");
}
- _ if fxcodegen_cx.tcx.symbol_name(fx.instance).name == "__alloca" => {
+ _ if fx.codegen_cx.tcx.symbol_name(fx.instance).name == "__alloca" => {
crate::trap::trap_unimplemented(fx, "Alloca is not supported");
}
// Used in sys::windows::abort_internal
"int $$0x29" => {
crate::trap::trap_unimplemented(fx, "Windows abort");
}
- _ => fxcodegen_cx.tcx.sess.span_fatal(stmt.source_info.span, "Inline assembly is not supported"),
+ _ => fx.codegen_cx.tcx.sess.span_fatal(stmt.source_info.span, "Inline assembly is not supported"),
}
}
}
match place.layout().ty.kind {
ty::Array(_elem_ty, len) => {
let len = fx.monomorphize(&len)
- .eval(fxcodegen_cx.tcx, ParamEnv::reveal_all())
- .eval_usize(fxcodegen_cx.tcx, ParamEnv::reveal_all()) as i64;
+ .eval(fx.codegen_cx.tcx, ParamEnv::reveal_all())
+ .eval_usize(fx.codegen_cx.tcx, ParamEnv::reveal_all()) as i64;
fx.bcx.ins().iconst(fx.pointer_type, len)
}
ty::Slice(_elem_ty) => place
let ptr = cplace.to_ptr();
cplace = CPlace::for_ptr(
ptr.offset_i64(fx, elem_layout.size.bytes() as i64 * i64::from(from)),
- fx.layout_of(fxcodegen_cx.tcx.mk_array(elem_ty, u64::from(to) - u64::from(from))),
+ fx.layout_of(fx.codegen_cx.tcx.mk_array(elem_ty, u64::from(to) - u64::from(from))),
);
}
ty::Slice(elem_ty) => {
);
let from_rust_ty = if from_signed {
- fxcodegen_cx.tcx.types.i128
+ fx.codegen_cx.tcx.types.i128
} else {
- fxcodegen_cx.tcx.types.u128
+ fx.codegen_cx.tcx.types.u128
};
let to_rust_ty = match to_ty {
- types::F32 => fxcodegen_cx.tcx.types.f32,
- types::F64 => fxcodegen_cx.tcx.types.f64,
+ types::F32 => fx.codegen_cx.tcx.types.f32,
+ types::F64 => fx.codegen_cx.tcx.types.f64,
_ => unreachable!(),
};
);
let from_rust_ty = match from_ty {
- types::F32 => fxcodegen_cx.tcx.types.f32,
- types::F64 => fxcodegen_cx.tcx.types.f64,
+ types::F32 => fx.codegen_cx.tcx.types.f32,
+ types::F64 => fx.codegen_cx.tcx.types.f64,
_ => unreachable!(),
};
let to_rust_ty = if to_signed {
- fxcodegen_cx.tcx.types.i128
+ fx.codegen_cx.tcx.types.i128
} else {
- fxcodegen_cx.tcx.types.u128
+ fx.codegen_cx.tcx.types.u128
};
return fx
lhs: CValue<'tcx>,
rhs: CValue<'tcx>,
) -> Option<CValue<'tcx>> {
- if lhs.layout().ty != fxcodegen_cx.tcx.types.u128 && lhs.layout().ty != fxcodegen_cx.tcx.types.i128 {
+ if lhs.layout().ty != fx.codegen_cx.tcx.types.u128 && lhs.layout().ty != fx.codegen_cx.tcx.types.i128 {
return None;
}
}
BinOp::Add | BinOp::Sub if !checked => return None,
BinOp::Add => {
- let out_ty = fxcodegen_cx.tcx.mk_tup([lhs.layout().ty, fxcodegen_cx.tcx.types.bool].iter());
+ let out_ty = fx.codegen_cx.tcx.mk_tup([lhs.layout().ty, fx.codegen_cx.tcx.types.bool].iter());
return Some(if is_signed {
fx.easy_call("__rust_i128_addo", &[lhs, rhs], out_ty)
} else {
});
}
BinOp::Sub => {
- let out_ty = fxcodegen_cx.tcx.mk_tup([lhs.layout().ty, fxcodegen_cx.tcx.types.bool].iter());
+ let out_ty = fx.codegen_cx.tcx.mk_tup([lhs.layout().ty, fx.codegen_cx.tcx.types.bool].iter());
return Some(if is_signed {
fx.easy_call("__rust_i128_subo", &[lhs, rhs], out_ty)
} else {
BinOp::Offset => unreachable!("offset should only be used on pointers, not 128bit ints"),
BinOp::Mul => {
let res = if checked {
- let out_ty = fxcodegen_cx.tcx.mk_tup([lhs.layout().ty, fxcodegen_cx.tcx.types.bool].iter());
+ let out_ty = fx.codegen_cx.tcx.mk_tup([lhs.layout().ty, fx.codegen_cx.tcx.types.bool].iter());
if is_signed {
fx.easy_call("__rust_i128_mulo", &[lhs, rhs], out_ty)
} else {
}
} else {
let val_ty = if is_signed {
- fxcodegen_cx.tcx.types.i128
+ fx.codegen_cx.tcx.types.i128
} else {
- fxcodegen_cx.tcx.types.u128
+ fx.codegen_cx.tcx.types.u128
};
fx.easy_call("__multi3", &[lhs, rhs], val_ty)
};
BinOp::Div => {
assert!(!checked);
if is_signed {
- Some(fx.easy_call("__divti3", &[lhs, rhs], fxcodegen_cx.tcx.types.i128))
+ Some(fx.easy_call("__divti3", &[lhs, rhs], fx.codegen_cx.tcx.types.i128))
} else {
- Some(fx.easy_call("__udivti3", &[lhs, rhs], fxcodegen_cx.tcx.types.u128))
+ Some(fx.easy_call("__udivti3", &[lhs, rhs], fx.codegen_cx.tcx.types.u128))
}
}
BinOp::Rem => {
assert!(!checked);
if is_signed {
- Some(fx.easy_call("__modti3", &[lhs, rhs], fxcodegen_cx.tcx.types.i128))
+ Some(fx.easy_call("__modti3", &[lhs, rhs], fx.codegen_cx.tcx.types.i128))
} else {
- Some(fx.easy_call("__umodti3", &[lhs, rhs], fxcodegen_cx.tcx.types.u128))
+ Some(fx.easy_call("__umodti3", &[lhs, rhs], fx.codegen_cx.tcx.types.u128))
}
}
BinOp::Lt | BinOp::Le | BinOp::Eq | BinOp::Ge | BinOp::Gt | BinOp::Ne => {
let val = match (bin_op, is_signed) {
(BinOp::Shr, false) => {
let val = fx.bcx.ins().iconcat(lhs_msb, all_zeros);
- Some(CValue::by_val(val, fx.layout_of(fxcodegen_cx.tcx.types.u128)))
+ Some(CValue::by_val(val, fx.layout_of(fx.codegen_cx.tcx.types.u128)))
}
(BinOp::Shr, true) => {
let sign = fx.bcx.ins().icmp_imm(IntCC::SignedLessThan, lhs_msb, 0);
let all_sign_bits = fx.bcx.ins().select(sign, all_zeros, all_ones);
let val = fx.bcx.ins().iconcat(lhs_msb, all_sign_bits);
- Some(CValue::by_val(val, fx.layout_of(fxcodegen_cx.tcx.types.i128)))
+ Some(CValue::by_val(val, fx.layout_of(fx.codegen_cx.tcx.types.i128)))
}
(BinOp::Shl, _) => {
let val_ty = if is_signed {
- fxcodegen_cx.tcx.types.i128
+ fx.codegen_cx.tcx.types.i128
} else {
- fxcodegen_cx.tcx.types.u128
+ fx.codegen_cx.tcx.types.u128
};
let val = fx.bcx.ins().iconcat(all_zeros, lhs_lsb);
Some(CValue::by_val(val, fx.layout_of(val_ty)))
};
if let Some(val) = val {
if let Some(is_overflow) = is_overflow {
- let out_ty = fxcodegen_cx.tcx.mk_tup([lhs.layout().ty, fxcodegen_cx.tcx.types.bool].iter());
+ let out_ty = fx.codegen_cx.tcx.mk_tup([lhs.layout().ty, fx.codegen_cx.tcx.types.bool].iter());
let val = val.load_scalar(fx);
return Some(CValue::by_val_pair(val, is_overflow, fx.layout_of(out_ty)));
} else {
}
let truncated_rhs = clif_intcast(fx, rhs_val, types::I32, false);
- let truncated_rhs = CValue::by_val(truncated_rhs, fx.layout_of(fxcodegen_cx.tcx.types.u32));
+ let truncated_rhs = CValue::by_val(truncated_rhs, fx.layout_of(fx.codegen_cx.tcx.types.u32));
let val = match (bin_op, is_signed) {
(BinOp::Shl, false) => {
- fx.easy_call("__ashlti3", &[lhs, truncated_rhs], fxcodegen_cx.tcx.types.u128)
+ fx.easy_call("__ashlti3", &[lhs, truncated_rhs], fx.codegen_cx.tcx.types.u128)
}
(BinOp::Shl, true) => {
- fx.easy_call("__ashlti3", &[lhs, truncated_rhs], fxcodegen_cx.tcx.types.i128)
+ fx.easy_call("__ashlti3", &[lhs, truncated_rhs], fx.codegen_cx.tcx.types.i128)
}
(BinOp::Shr, false) => {
- fx.easy_call("__lshrti3", &[lhs, truncated_rhs], fxcodegen_cx.tcx.types.u128)
+ fx.easy_call("__lshrti3", &[lhs, truncated_rhs], fx.codegen_cx.tcx.types.u128)
}
(BinOp::Shr, true) => {
- fx.easy_call("__ashrti3", &[lhs, truncated_rhs], fxcodegen_cx.tcx.types.i128)
+ fx.easy_call("__ashrti3", &[lhs, truncated_rhs], fx.codegen_cx.tcx.types.i128)
}
(_, _) => unreachable!(),
};
if let Some(is_overflow) = is_overflow {
- let out_ty = fxcodegen_cx.tcx.mk_tup([lhs.layout().ty, fxcodegen_cx.tcx.types.bool].iter());
+ let out_ty = fx.codegen_cx.tcx.mk_tup([lhs.layout().ty, fx.codegen_cx.tcx.types.bool].iter());
let val = val.load_scalar(fx);
Some(CValue::by_val_pair(val, is_overflow, fx.layout_of(out_ty)))
} else {
}
pub(crate) struct FunctionCx<'clif, 'tcx, B: Backend + 'static> {
- pub(crate) codegen_cx: &'clif CodegenCx<'tcx, B>,
+ pub(crate) .codegen_cx: &'clif CodegenCx<'tcx, B>,
pub(crate) global_asm: &'clif mut String,
pub(crate) pointer_type: Type, // Cached from module
fn layout_of(&self, ty: Ty<'tcx>) -> TyAndLayout<'tcx> {
assert!(!ty.still_further_specializable());
- selfcodegen_cx.tcx
+ self.codegen_cx.tcx
.layout_of(ParamEnv::reveal_all().and(&ty))
.unwrap_or_else(|e| {
if let layout::LayoutError::SizeOverflow(_) = e {
- selfcodegen_cx.tcx.sess.fatal(&e.to_string())
+ self.codegen_cx.tcx.sess.fatal(&e.to_string())
} else {
bug!("failed to get layout for `{}`: {}", ty, e)
}
impl<'tcx, B: Backend + 'static> layout::HasTyCtxt<'tcx> for FunctionCx<'_, 'tcx, B> {
fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
- selfcodegen_cx.tcx
+ self.codegen_cx.tcx
}
}
impl<'tcx, B: Backend + 'static> rustc_target::abi::HasDataLayout for FunctionCx<'_, 'tcx, B> {
fn data_layout(&self) -> &rustc_target::abi::TargetDataLayout {
- &selfcodegen_cx.tcx.data_layout
+ &self.codegen_cx.tcx.data_layout
}
}
impl<'tcx, B: Backend + 'static> HasTargetSpec for FunctionCx<'_, 'tcx, B> {
fn target_spec(&self) -> &Target {
- &selfcodegen_cx.tcx.sess.target.target
+ &self.codegen_cx.tcx.sess.target.target
}
}
T: TypeFoldable<'tcx> + Copy,
{
if let Some(substs) = self.instance.substs_for_mir_body() {
- selfcodegen_cx.tcx.subst_and_normalize_erasing_regions(
+ self.codegen_cx.tcx.subst_and_normalize_erasing_regions(
substs,
ty::ParamEnv::reveal_all(),
value,
)
} else {
- selfcodegen_cx.tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), *value)
+ self.codegen_cx.tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), *value)
}
}
pub(crate) fn clif_type(&self, ty: Ty<'tcx>) -> Option<Type> {
- clif_type_from_ty(selfcodegen_cx.tcx, ty)
+ clif_type_from_ty(self.codegen_cx.tcx, ty)
}
pub(crate) fn clif_pair_type(&self, ty: Ty<'tcx>) -> Option<(Type, Type)> {
- clif_pair_type_from_ty(selfcodegen_cx.tcx, ty)
+ clif_pair_type_from_ty(self.codegen_cx.tcx, ty)
}
pub(crate) fn get_block(&self, bb: BasicBlock) -> Block {
}
let topmost = span.ctxt().outer_expn().expansion_cause().unwrap_or(span);
- let caller = selfcodegen_cx.tcx.sess.source_map().lookup_char_pos(topmost.lo());
- let const_loc = selfcodegen_cx.tcx.const_caller_location((
+ let caller = self.codegen_cx.tcx.sess.source_map().lookup_char_pos(topmost.lo());
+ let const_loc = self.codegen_cx.tcx.const_caller_location((
rustc_span::symbol::Symbol::intern(&caller.file.name.to_string()),
caller.line as u32,
caller.col_display as u32 + 1,
crate::constant::trans_const_value(
self,
const_loc,
- selfcodegen_cx.tcx.caller_location_ty(),
+ self.codegen_cx.tcx.caller_location_ty(),
)
}
pub(crate) fn triple(&self) -> &target_lexicon::Triple {
- selfcodegen_cx.module.isa().triple()
+ self.codegen_cx.module.isa().triple()
}
pub(crate) fn anonymous_str(&mut self, prefix: &str, msg: &str) -> Value {
let mut data_ctx = DataContext::new();
data_ctx.define(msg.as_bytes().to_vec().into_boxed_slice());
let msg_id = self
- codegen_cx.module
+ .codegen_cx.module
.declare_data(
&format!("__{}_{:08x}", prefix, msg_hash),
Linkage::Local,
.unwrap();
// Ignore DuplicateDefinition error, as the data will be the same
- let _ = selfcodegen_cx.module.define_data(msg_id, &data_ctx);
+ let _ = self.codegen_cx.module.define_data(msg_id, &data_ctx);
- let local_msg_id = selfcodegen_cx.module.declare_data_in_func(msg_id, self.bcx.func);
+ let local_msg_id = self.codegen_cx.module.declare_data_in_func(msg_id, self.bcx.func);
#[cfg(debug_assertions)]
{
self.add_comment(local_msg_id, msg);
match const_.val {
ConstKind::Value(_) => {}
ConstKind::Unevaluated(def, ref substs, promoted) => {
- if let Err(err) = fxcodegen_cx.tcx.const_eval_resolve(ParamEnv::reveal_all(), def, substs, promoted, None) {
+ if let Err(err) = fx.codegen_cx.tcx.const_eval_resolve(ParamEnv::reveal_all(), def, substs, promoted, None) {
match err {
ErrorHandled::Reported(ErrorReported) | ErrorHandled::Linted => {
- fxcodegen_cx.tcx.sess.span_err(constant.span, "erroneous constant encountered");
+ fx.codegen_cx.tcx.sess.span_err(constant.span, "erroneous constant encountered");
}
ErrorHandled::TooGeneric => {
span_bug!(constant.span, "codgen encountered polymorphic constant: {:?}", err);
def_id: DefId,
layout: TyAndLayout<'tcx>,
) -> CValue<'tcx> {
- let data_id = data_id_for_static(fxcodegen_cx.tcx, fxcodegen_cx.module, def_id, false);
- let local_data_id = fxcodegen_cx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
+ let data_id = data_id_for_static(fx.codegen_cx.tcx, fx.codegen_cx.module, def_id, false);
+ let local_data_id = fx.codegen_cx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
#[cfg(debug_assertions)]
fx.add_comment(local_data_id, format!("tls {:?}", def_id));
let tls_ptr = fx.bcx.ins().tls_value(fx.pointer_type, local_data_id);
def_id: DefId,
layout: TyAndLayout<'tcx>,
) -> CPlace<'tcx> {
- let data_id = data_id_for_static(fxcodegen_cx.tcx, fxcodegen_cx.module, def_id, false);
- let local_data_id = fxcodegen_cx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
+ let data_id = data_id_for_static(fx.codegen_cx.tcx, fx.codegen_cx.module, def_id, false);
+ let local_data_id = fx.codegen_cx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
#[cfg(debug_assertions)]
fx.add_comment(local_data_id, format!("{:?}", def_id));
let global_ptr = fx.bcx.ins().global_value(fx.pointer_type, local_data_id);
let const_ = fx.monomorphize(&constant.literal);
let const_val = match const_.val {
ConstKind::Value(const_val) => const_val,
- ConstKind::Unevaluated(def, ref substs, promoted) if fxcodegen_cx.tcx.is_static(def.did) => {
+ ConstKind::Unevaluated(def, ref substs, promoted) if fx.codegen_cx.tcx.is_static(def.did) => {
assert!(substs.is_empty());
assert!(promoted.is_none());
).to_cvalue(fx);
}
ConstKind::Unevaluated(def, ref substs, promoted) => {
- match fxcodegen_cx.tcx.const_eval_resolve(ParamEnv::reveal_all(), def, substs, promoted, None) {
+ match fx.codegen_cx.tcx.const_eval_resolve(ParamEnv::reveal_all(), def, substs, promoted, None) {
Ok(const_val) => const_val,
Err(_) => {
if promoted.is_none() {
- fxcodegen_cx.tcx.sess.span_err(constant.span, "erroneous constant encountered");
+ fx.codegen_cx.tcx.sess.span_err(constant.span, "erroneous constant encountered");
}
return crate::trap::trap_unreachable_ret_value(
fx,
);
let ptr = Pointer::new(AllocId(!0), Size::ZERO); // The alloc id is never used
alloc.write_scalar(fx, ptr, x.into(), size).unwrap();
- let alloc = fxcodegen_cx.tcx.intern_const_alloc(alloc);
+ let alloc = fx.codegen_cx.tcx.intern_const_alloc(alloc);
return CValue::by_ref(pointer_for_allocation(fx, alloc), layout);
}
return CValue::const_val(fx, layout, data);
}
Scalar::Ptr(ptr) => {
- let alloc_kind = fxcodegen_cx.tcx.get_global_alloc(ptr.alloc_id);
+ let alloc_kind = fx.codegen_cx.tcx.get_global_alloc(ptr.alloc_id);
let base_addr = match alloc_kind {
Some(GlobalAlloc::Memory(alloc)) => {
- fxcodegen_cx.constants_cx.todo.push(TodoItem::Alloc(ptr.alloc_id));
- let data_id = data_id_for_alloc_id(fxcodegen_cx.module, ptr.alloc_id, alloc.align, alloc.mutability);
- let local_data_id = fxcodegen_cx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
+ fx.codegen_cx.constants_cx.todo.push(TodoItem::Alloc(ptr.alloc_id));
+ let data_id = data_id_for_alloc_id(fx.codegen_cx.module, ptr.alloc_id, alloc.align, alloc.mutability);
+ let local_data_id = fx.codegen_cx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
#[cfg(debug_assertions)]
fx.add_comment(local_data_id, format!("{:?}", ptr.alloc_id));
fx.bcx.ins().global_value(fx.pointer_type, local_data_id)
}
Some(GlobalAlloc::Function(instance)) => {
- let func_id = crate::abi::import_function(fxcodegen_cx.tcx, fxcodegen_cx.module, instance);
- let local_func_id = fxcodegen_cx.module.declare_func_in_func(func_id, &mut fx.bcx.func);
+ let func_id = crate::abi::import_function(fx.codegen_cx.tcx, fx.codegen_cx.module, instance);
+ let local_func_id = fx.codegen_cx.module.declare_func_in_func(func_id, &mut fx.bcx.func);
fx.bcx.ins().func_addr(fx.pointer_type, local_func_id)
}
Some(GlobalAlloc::Static(def_id)) => {
- assert!(fxcodegen_cx.tcx.is_static(def_id));
- let data_id = data_id_for_static(fxcodegen_cx.tcx, fxcodegen_cx.module, def_id, false);
- let local_data_id = fxcodegen_cx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
+ assert!(fx.codegen_cx.tcx.is_static(def_id));
+ let data_id = data_id_for_static(fx.codegen_cx.tcx, fx.codegen_cx.module, def_id, false);
+ let local_data_id = fx.codegen_cx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
#[cfg(debug_assertions)]
fx.add_comment(local_data_id, format!("{:?}", def_id));
fx.bcx.ins().global_value(fx.pointer_type, local_data_id)
fx: &mut FunctionCx<'_, 'tcx, impl Backend>,
alloc: &'tcx Allocation,
) -> crate::pointer::Pointer {
- let alloc_id = fxcodegen_cx.tcx.create_memory_alloc(alloc);
- fxcodegen_cx.constants_cx.todo.push(TodoItem::Alloc(alloc_id));
- let data_id = data_id_for_alloc_id(fxcodegen_cx.module, alloc_id, alloc.align, alloc.mutability);
+ let alloc_id = fx.codegen_cx.tcx.create_memory_alloc(alloc);
+ fx.codegen_cx.constants_cx.todo.push(TodoItem::Alloc(alloc_id));
+ let data_id = data_id_for_alloc_id(fx.codegen_cx.module, alloc_id, alloc.align, alloc.mutability);
- let local_data_id = fxcodegen_cx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
+ let local_data_id = fx.codegen_cx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
#[cfg(debug_assertions)]
fx.add_comment(local_data_id, format!("{:?}", alloc_id));
let global_ptr = fx.bcx.ins().global_value(fx.pointer_type, local_data_id);
match operand {
Operand::Copy(_) | Operand::Move(_) => None,
Operand::Constant(const_) => {
- Some(fx.monomorphize(&const_.literal).eval(fxcodegen_cx.tcx, ParamEnv::reveal_all()))
+ Some(fx.monomorphize(&const_.literal).eval(fx.codegen_cx.tcx, ParamEnv::reveal_all()))
}
}
}
impl<'tcx> DebugContext<'tcx> {
pub(super) fn emit_location(&mut self, entry_id: UnitEntryId, span: Span) {
- let loc = selfcodegen_cx.tcx.sess.source_map().lookup_char_pos(span.lo());
+ let loc = self.codegen_cx.tcx.sess.source_map().lookup_char_pos(span.lo());
let file_id = line_program_add_file(
&mut self.dwarf.unit.line_program,
function_span: Span,
source_info_set: &indexmap::IndexSet<SourceInfo>,
) -> CodeOffset {
- let tcx = selfcodegen_cx.tcx;
+ let tcx = self.codegen_cx.tcx;
let line_program = &mut self.dwarf.unit.line_program;
let func = &context.func;
};
let name = format!("{}", ty);
- let layout = selfcodegen_cx.tcx.layout_of(ParamEnv::reveal_all().and(ty)).unwrap();
+ let layout = self.codegen_cx.tcx.layout_of(ParamEnv::reveal_all().and(ty)).unwrap();
let type_id = match ty.kind {
ty::Bool => primitive(&mut self.dwarf, gimli::DW_ATE_boolean),
for (field_idx, field_def) in variant.fields.iter().enumerate() {
let field_offset = layout.fields.offset(field_idx);
let field_layout = layout.field(&layout::LayoutCx {
- tcx: selfcodegen_cx.tcx,
+ tcx: self.codegen_cx.tcx,
param_env: ParamEnv::reveal_all(),
}, field_idx).unwrap();
local_map: FxHashMap<mir::Local, CPlace<'tcx>>,
) {
let symbol = func_id.as_u32() as usize;
- let mir = selfcodegen_cx.tcx.instance_mir(instance.def);
+ let mir = self.codegen_cx.tcx.instance_mir(instance.def);
// FIXME: add to appropriate scope instead of root
let scope = self.dwarf.unit.root();
let value_labels_ranges = context.build_value_labels_ranges(isa).unwrap();
for (local, _local_decl) in mir.local_decls.iter_enumerated() {
- let ty = selfcodegen_cx.tcx.subst_and_normalize_erasing_regions(
+ let ty = self.codegen_cx.tcx.subst_and_normalize_erasing_regions(
instance.substs,
ty::ParamEnv::reveal_all(),
&mir.local_decls[local].ty,
}
pub(crate) fn emit<P: WriteDebugInfo>(self, product: &mut P) {
- let mut eh_frame = EhFrame::from(super::emit::WriterRelocate::new(super::target_endian(selfcodegen_cx.tcx)));
+ let mut eh_frame = EhFrame::from(super::emit::WriterRelocate::new(super::target_endian(self.codegen_cx.tcx)));
self.frame_table.write_eh_frame(&mut eh_frame).unwrap();
if !eh_frame.0.writer.slice().is_empty() {
self,
jit_module: &mut Module<cranelift_simplejit::SimpleJITBackend>,
) -> Option<UnwindRegistry> {
- let mut eh_frame = EhFrame::from(super::emit::WriterRelocate::new(super::target_endian(selfcodegen_cx.tcx)));
+ let mut eh_frame = EhFrame::from(super::emit::WriterRelocate::new(super::target_endian(self.codegen_cx.tcx)));
self.frame_table.write_eh_frame(&mut eh_frame).unwrap();
if eh_frame.0.writer.slice().is_empty() {
let ptr = place.place_field(fx, mir::Field::new(tag_field));
let to = layout
.ty
- .discriminant_for_variant(fxcodegen_cx.tcx, variant_index)
+ .discriminant_for_variant(fx.codegen_cx.tcx, variant_index)
.unwrap()
.val;
let discr = CValue::const_val(fx, ptr.layout(), to);
Variants::Single { index } => {
let discr_val = layout
.ty
- .discriminant_for_variant(fxcodegen_cx.tcx, *index)
+ .discriminant_for_variant(fx.codegen_cx.tcx, *index)
.map_or(u128::from(index.as_u32()), |discr| discr.val);
return CValue::const_val(fx, dest_layout, discr_val);
}
cx: &mut crate::CodegenCx<'tcx, impl Backend + 'static>,
mono_items: Vec<(MonoItem<'tcx>, (RLinkage, Visibility))>,
) {
- cxcodegen_cx.tcx.sess.time("predefine functions", || {
+ cx.codegen_cx.tcx.sess.time("predefine functions", || {
for &(mono_item, (linkage, visibility)) in &mono_items {
match mono_item {
MonoItem::Fn(instance) => {
let (name, sig) =
- get_function_name_and_sig(cxcodegen_cx.tcx, cxcodegen_cx.module.isa().triple(), instance, false);
+ get_function_name_and_sig(cx.codegen_cx.tcx, cx.codegen_cx.module.isa().triple(), instance, false);
let linkage = crate::linkage::get_clif_linkage(mono_item, linkage, visibility);
- cxcodegen_cx.module.declare_function(&name, linkage, &sig).unwrap();
+ cx.codegen_cx.module.declare_function(&name, linkage, &sig).unwrap();
}
MonoItem::Static(_) | MonoItem::GlobalAsm(_) => {}
}
mono_item: MonoItem<'tcx>,
linkage: Linkage,
) {
- let tcx = cxcodegen_cx.tcx;
+ let tcx = cx.codegen_cx.tcx;
match mono_item {
MonoItem::Fn(inst) => {
let _inst_guard =
}
});
- cxcodegen_cx.tcx.sess.time("codegen fn", || crate::base::trans_fn(cx, inst, linkage));
+ cx.codegen_cx.tcx.sess.time("codegen fn", || crate::base::trans_fn(cx, inst, linkage));
}
MonoItem::Static(def_id) => {
- crate::constant::codegen_static(&mut cxcodegen_cx.constants_cx, def_id);
+ crate::constant::codegen_static(&mut cx.codegen_cx.constants_cx, def_id);
}
MonoItem::GlobalAsm(hir_id) => {
let item = tcx.hir().expect_item(hir_id);
let inline_asm_index = fx.inline_asm_index;
fx.inline_asm_index += 1;
- let asm_name = format!("{}__inline_asm_{}", fxcodegen_cx.tcx.symbol_name(fx.instance).name, inline_asm_index);
+ let asm_name = format!("{}__inline_asm_{}", fx.codegen_cx.tcx.symbol_name(fx.instance).name, inline_asm_index);
let generated_asm = generate_asm_wrapper(&asm_name, InlineAsmArch::X86_64, options, template, clobbered_regs, &inputs, &outputs);
fx.global_asm.push_str(&generated_asm);
#[cfg(debug_assertions)]
fx.add_comment(stack_slot, "inline asm scratch slot");
- let inline_asm_func = fxcodegen_cx.module.declare_function(asm_name, Linkage::Import, &Signature {
+ let inline_asm_func = fx.codegen_cx.module.declare_function(asm_name, Linkage::Import, &Signature {
call_conv: CallConv::SystemV,
params: vec![AbiParam::new(fx.pointer_type)],
returns: vec![],
}).unwrap();
- let inline_asm_func = fxcodegen_cx.module.declare_func_in_func(inline_asm_func, &mut fx.bcx.func);
+ let inline_asm_func = fx.codegen_cx.module.declare_func_in_func(inline_asm_func, &mut fx.bcx.func);
#[cfg(debug_assertions)]
fx.add_comment(inline_asm_func, asm_name);
intrinsic_match! {
fx, intrinsic, substs, args,
_ => {
- fxcodegen_cx.tcx.sess.warn(&format!("unsupported llvm intrinsic {}; replacing with trap", intrinsic));
+ fx.codegen_cx.tcx.sess.warn(&format!("unsupported llvm intrinsic {}; replacing with trap", intrinsic));
crate::trap::trap_unimplemented(fx, intrinsic);
};
// Used by `_mm_movemask_epi8` and `_mm256_movemask_epi8`
llvm.x86.sse2.pmovmskb.128 | llvm.x86.avx2.pmovmskb | llvm.x86.sse2.movmsk.pd, (c a) {
- let (lane_layout, lane_count) = lane_type_and_count(fxcodegen_cx.tcx, a.layout());
+ let (lane_layout, lane_count) = lane_type_and_count(fx.codegen_cx.tcx, a.layout());
let lane_ty = fx.clif_type(lane_layout.ty).unwrap();
assert!(lane_count <= 32);
res = fx.bcx.ins().bor(res, a_lane_sign);
}
- let res = CValue::by_val(res, fx.layout_of(fxcodegen_cx.tcx.types.i32));
+ let res = CValue::by_val(res, fx.layout_of(fx.codegen_cx.tcx.types.i32));
ret.write_cvalue(fx, res);
};
llvm.x86.sse2.cmp.ps | llvm.x86.sse2.cmp.pd, (c x, c y, o kind) {
let ($($arg,)*) = (
$(trans_operand($fx, $arg),)*
);
- let res = $fx.easy_call(stringify!($func), &[$($arg),*], $fxcodegen_cx.tcx.types.$ty);
+ let res = $fx.easy_call(stringify!($func), &[$($arg),*], $fx.codegen_cx.tcx.types.$ty);
$ret.write_cvalue($fx, res);
if let Some((_, dest)) = $destination {
match $ty.kind {
ty::Uint(_) | ty::Int(_) => {}
_ => {
- $fxcodegen_cx.tcx.sess.span_err($span, &format!("`{}` intrinsic: expected basic integer type, found `{:?}`", $intrinsic, $ty));
+ $fx.codegen_cx.tcx.sess.span_err($span, &format!("`{}` intrinsic: expected basic integer type, found `{:?}`", $intrinsic, $ty));
// Prevent verifier error
crate::trap::trap_unreachable($fx, "compilation should not have succeeded");
return;
macro validate_simd_type($fx:ident, $intrinsic:ident, $span:ident, $ty:expr) {
if !$ty.is_simd() {
- $fxcodegen_cx.tcx.sess.span_err($span, &format!("invalid monomorphization of `{}` intrinsic: expected SIMD input type, found non-SIMD `{}`", $intrinsic, $ty));
+ $fx.codegen_cx.tcx.sess.span_err($span, &format!("invalid monomorphization of `{}` intrinsic: expected SIMD input type, found non-SIMD `{}`", $intrinsic, $ty));
// Prevent verifier error
crate::trap::trap_unreachable($fx, "compilation should not have succeeded");
return;
) {
let layout = val.layout();
- let (lane_layout, lane_count) = lane_type_and_count(fxcodegen_cx.tcx, layout);
- let (ret_lane_layout, ret_lane_count) = lane_type_and_count(fxcodegen_cx.tcx, ret.layout());
+ let (lane_layout, lane_count) = lane_type_and_count(fx.codegen_cx.tcx, layout);
+ let (ret_lane_layout, ret_lane_count) = lane_type_and_count(fx.codegen_cx.tcx, ret.layout());
assert_eq!(lane_count, ret_lane_count);
for lane_idx in 0..lane_count {
assert_eq!(x.layout(), y.layout());
let layout = x.layout();
- let (lane_layout, lane_count) = lane_type_and_count(fxcodegen_cx.tcx, layout);
- let (ret_lane_layout, ret_lane_count) = lane_type_and_count(fxcodegen_cx.tcx, ret.layout());
+ let (lane_layout, lane_count) = lane_type_and_count(fx.codegen_cx.tcx, layout);
+ let (ret_lane_layout, ret_lane_count) = lane_type_and_count(fx.codegen_cx.tcx, ret.layout());
assert_eq!(lane_count, ret_lane_count);
for lane in 0..lane_count {
macro simd_cmp {
($fx:expr, $cc:ident($x:ident, $y:ident) -> $ret:ident) => {
- let vector_ty = clif_vector_type($fxcodegen_cx.tcx, $x.layout());
+ let vector_ty = clif_vector_type($fx.codegen_cx.tcx, $x.layout());
if let Some(vector_ty) = vector_ty {
let x = $x.load_scalar($fx);
let def_id = instance.def_id();
let substs = instance.substs;
- let intrinsic = fxcodegen_cx.tcx.item_name(def_id).as_str();
+ let intrinsic = fx.codegen_cx.tcx.item_name(def_id).as_str();
let intrinsic = &intrinsic[..];
let ret = match destination {
return;
}
- let usize_layout = fx.layout_of(fxcodegen_cx.tcx.types.usize);
+ let usize_layout = fx.layout_of(fx.codegen_cx.tcx.types.usize);
call_intrinsic_match! {
fx, intrinsic, substs, ret, destination, args,
intrinsic_match! {
fx, intrinsic, substs, args,
_ => {
- fxcodegen_cx.tcx.sess.span_fatal(span, &format!("unsupported intrinsic {}", intrinsic));
+ fx.codegen_cx.tcx.sess.span_fatal(span, &format!("unsupported intrinsic {}", intrinsic));
};
assume, (c _a) {};
if intrinsic.contains("nonoverlapping") {
// FIXME emit_small_memcpy
- fx.bcx.call_memcpy(fxcodegen_cx.module.target_config(), dst, src, byte_amount);
+ fx.bcx.call_memcpy(fx.codegen_cx.module.target_config(), dst, src, byte_amount);
} else {
// FIXME emit_small_memmove
- fx.bcx.call_memmove(fxcodegen_cx.module.target_config(), dst, src, byte_amount);
+ fx.bcx.call_memmove(fx.codegen_cx.module.target_config(), dst, src, byte_amount);
}
};
// NOTE: the volatile variants have src and dst swapped
// FIXME make the copy actually volatile when using emit_small_mem{cpy,move}
if intrinsic.contains("nonoverlapping") {
// FIXME emit_small_memcpy
- fx.bcx.call_memcpy(fxcodegen_cx.module.target_config(), dst, src, byte_amount);
+ fx.bcx.call_memcpy(fx.codegen_cx.module.target_config(), dst, src, byte_amount);
} else {
// FIXME emit_small_memmove
- fx.bcx.call_memmove(fxcodegen_cx.module.target_config(), dst, src, byte_amount);
+ fx.bcx.call_memmove(fx.codegen_cx.module.target_config(), dst, src, byte_amount);
}
};
discriminant_value, (c ptr) {
let dst_ptr = dst.load_scalar(fx);
// FIXME make the memset actually volatile when switching to emit_small_memset
// FIXME use emit_small_memset
- fx.bcx.call_memset(fxcodegen_cx.module.target_config(), dst_ptr, val, count);
+ fx.bcx.call_memset(fx.codegen_cx.module.target_config(), dst_ptr, val, count);
};
ctlz | ctlz_nonzero, <T> (v arg) {
// FIXME trap on `ctlz_nonzero` with zero arg.
- let res = if T == fxcodegen_cx.tcx.types.u128 || T == fxcodegen_cx.tcx.types.i128 {
+ let res = if T == fx.codegen_cx.tcx.types.u128 || T == fx.codegen_cx.tcx.types.i128 {
// FIXME verify this algorithm is correct
let (lsb, msb) = fx.bcx.ins().isplit(arg);
let lsb_lz = fx.bcx.ins().clz(lsb);
};
cttz | cttz_nonzero, <T> (v arg) {
// FIXME trap on `cttz_nonzero` with zero arg.
- let res = if T == fxcodegen_cx.tcx.types.u128 || T == fxcodegen_cx.tcx.types.i128 {
+ let res = if T == fx.codegen_cx.tcx.types.u128 || T == fx.codegen_cx.tcx.types.i128 {
// FIXME verify this algorithm is correct
let (lsb, msb) = fx.bcx.ins().isplit(arg);
let lsb_tz = fx.bcx.ins().ctz(lsb);
size_of | pref_align_of | min_align_of | needs_drop | type_id | type_name | variant_count, () {
let const_val =
- fxcodegen_cx.tcx.const_eval_instance(ParamEnv::reveal_all(), instance, None).unwrap();
+ fx.codegen_cx.tcx.const_eval_instance(ParamEnv::reveal_all(), instance, None).unwrap();
let val = crate::constant::trans_const_value(
fx,
const_val,
};
ptr_offset_from, <T> (v ptr, v base) {
- let isize_layout = fx.layout_of(fxcodegen_cx.tcx.types.isize);
+ let isize_layout = fx.layout_of(fx.codegen_cx.tcx.types.isize);
let pointee_size: u64 = fx.layout_of(T).size.bytes();
let diff = fx.bcx.ins().isub(ptr, base);
minnumf32, (v a, v b) {
let val = fx.bcx.ins().fmin(a, b);
- let val = CValue::by_val(val, fx.layout_of(fxcodegen_cx.tcx.types.f32));
+ let val = CValue::by_val(val, fx.layout_of(fx.codegen_cx.tcx.types.f32));
ret.write_cvalue(fx, val);
};
minnumf64, (v a, v b) {
let val = fx.bcx.ins().fmin(a, b);
- let val = CValue::by_val(val, fx.layout_of(fxcodegen_cx.tcx.types.f64));
+ let val = CValue::by_val(val, fx.layout_of(fx.codegen_cx.tcx.types.f64));
ret.write_cvalue(fx, val);
};
maxnumf32, (v a, v b) {
let val = fx.bcx.ins().fmax(a, b);
- let val = CValue::by_val(val, fx.layout_of(fxcodegen_cx.tcx.types.f32));
+ let val = CValue::by_val(val, fx.layout_of(fx.codegen_cx.tcx.types.f32));
ret.write_cvalue(fx, val);
};
maxnumf64, (v a, v b) {
let val = fx.bcx.ins().fmax(a, b);
- let val = CValue::by_val(val, fx.layout_of(fxcodegen_cx.tcx.types.f64));
+ let val = CValue::by_val(val, fx.layout_of(fx.codegen_cx.tcx.types.f64));
ret.write_cvalue(fx, val);
};
let def_id = instance.def_id();
let substs = instance.substs;
- let intrinsic = fxcodegen_cx.tcx.item_name(def_id).as_str();
+ let intrinsic = fx.codegen_cx.tcx.item_name(def_id).as_str();
let intrinsic = &intrinsic[..];
intrinsic_match! {
fx, intrinsic, substs, args,
_ => {
- fxcodegen_cx.tcx.sess.span_fatal(span, &format!("Unknown SIMD intrinsic {}", intrinsic));
+ fx.codegen_cx.tcx.sess.span_fatal(span, &format!("Unknown SIMD intrinsic {}", intrinsic));
};
simd_cast, (c a) {
assert_eq!(x.layout(), y.layout());
let layout = x.layout();
- let (lane_type, lane_count) = lane_type_and_count(fxcodegen_cx.tcx, layout);
- let (ret_lane_type, ret_lane_count) = lane_type_and_count(fxcodegen_cx.tcx, ret.layout());
+ let (lane_type, lane_count) = lane_type_and_count(fx.codegen_cx.tcx, layout);
+ let (ret_lane_type, ret_lane_count) = lane_type_and_count(fx.codegen_cx.tcx, ret.layout());
assert_eq!(lane_type, ret_lane_type);
assert_eq!(n, ret_lane_count);
(0..ret_lane_count).map(|i| {
let i = usize::try_from(i).unwrap();
let idx = rustc_middle::mir::interpret::read_target_uint(
- fxcodegen_cx.tcx.data_layout.endian,
+ fx.codegen_cx.tcx.data_layout.endian,
&idx_bytes[4*i.. 4*i + 4],
).expect("read_target_uint");
u16::try_from(idx).expect("try_from u32")
let idx_const = if let Some(idx_const) = crate::constant::mir_operand_get_const_val(fx, idx) {
idx_const
} else {
- fxcodegen_cx.tcx.sess.span_warn(
+ fx.codegen_cx.tcx.sess.span_warn(
fx.mir.span,
"`#[rustc_arg_required_const(..)]` is not yet supported. Calling this function will panic.",
);
};
let idx = idx_const.val.try_to_bits(Size::from_bytes(4 /* u32*/)).expect(&format!("kind not scalar: {:?}", idx_const));
- let (_lane_type, lane_count) = lane_type_and_count(fxcodegen_cx.tcx, base.layout());
+ let (_lane_type, lane_count) = lane_type_and_count(fx.codegen_cx.tcx, base.layout());
if idx >= lane_count.into() {
- fxcodegen_cx.tcx.sess.span_fatal(fx.mir.span, &format!("[simd_insert] idx {} >= lane_count {}", idx, lane_count));
+ fx.codegen_cx.tcx.sess.span_fatal(fx.mir.span, &format!("[simd_insert] idx {} >= lane_count {}", idx, lane_count));
}
ret.write_cvalue(fx, base);
let idx_const = if let Some(idx_const) = crate::constant::mir_operand_get_const_val(fx, idx) {
idx_const
} else {
- fxcodegen_cx.tcx.sess.span_warn(
+ fx.codegen_cx.tcx.sess.span_warn(
fx.mir.span,
"`#[rustc_arg_required_const(..)]` is not yet supported. Calling this function will panic.",
);
};
let idx = idx_const.val.try_to_bits(Size::from_bytes(4 /* u32*/)).expect(&format!("kind not scalar: {:?}", idx_const));
- let (_lane_type, lane_count) = lane_type_and_count(fxcodegen_cx.tcx, v.layout());
+ let (_lane_type, lane_count) = lane_type_and_count(fx.codegen_cx.tcx, v.layout());
if idx >= lane_count.into() {
- fxcodegen_cx.tcx.sess.span_fatal(fx.mir.span, &format!("[simd_extract] idx {} >= lane_count {}", idx, lane_count));
+ fx.codegen_cx.tcx.sess.span_fatal(fx.mir.span, &format!("[simd_extract] idx {} >= lane_count {}", idx, lane_count));
}
let ret_lane = v.value_field(fx, mir::Field::new(idx.try_into().unwrap()));
assert_eq!(a.layout(), c.layout());
let layout = a.layout();
- let (_lane_layout, lane_count) = lane_type_and_count(fxcodegen_cx.tcx, layout);
- let (ret_lane_layout, ret_lane_count) = lane_type_and_count(fxcodegen_cx.tcx, ret.layout());
+ let (_lane_layout, lane_count) = lane_type_and_count(fx.codegen_cx.tcx, layout);
+ let (ret_lane_layout, ret_lane_count) = lane_type_and_count(fx.codegen_cx.tcx, ret.layout());
assert_eq!(lane_count, ret_lane_count);
for lane in 0..lane_count {
}
fn finalize(mut self) -> (Module<B>, String, Option<DebugContext<'tcx>>, UnwindContext<'tcx>) {
- selfcodegen_cx.constants_cx.finalize(selfcodegen_cx.tcx, &mut selfcodegen_cx.module);
- (selfcodegen_cx.module, self.global_asm, self.debug_context, self.unwind_context)
+ self.codegen_cx.constants_cx.finalize(self.codegen_cx.tcx, &mut self.codegen_cx.module);
+ (self.codegen_cx.module, self.global_asm, self.debug_context, self.unwind_context)
}
}
let intcc = crate::num::bin_op_to_intcc(bin_op, signed).unwrap();
let val = fx.bcx.ins().icmp(intcc, lhs, rhs);
let val = fx.bcx.ins().bint(types::I8, val);
- CValue::by_val(val, fx.layout_of(fxcodegen_cx.tcx.types.bool))
+ CValue::by_val(val, fx.layout_of(fx.codegen_cx.tcx.types.bool))
}
pub(crate) fn codegen_binop<'tcx>(
let rhs = in_rhs.load_scalar(fx);
let (lhs, rhs) = if (bin_op == BinOp::Eq || bin_op == BinOp::Ne)
- && (in_lhs.layout().ty.kind == fxcodegen_cx.tcx.types.i8.kind
- || in_lhs.layout().ty.kind == fxcodegen_cx.tcx.types.i16.kind)
+ && (in_lhs.layout().ty.kind == fx.codegen_cx.tcx.types.i8.kind
+ || in_lhs.layout().ty.kind == fx.codegen_cx.tcx.types.i16.kind)
{
// FIXME(CraneStation/cranelift#896) icmp_imm.i8/i16 with eq/ne for signed ints is implemented wrong.
(
_ => unreachable!("{:?}({:?}, {:?})", bin_op, in_lhs, in_rhs),
};
- CValue::by_val(res, fx.layout_of(fxcodegen_cx.tcx.types.bool))
+ CValue::by_val(res, fx.layout_of(fx.codegen_cx.tcx.types.bool))
}
pub(crate) fn trans_int_binop<'tcx>(
// FIXME directly write to result place instead
let out_place = CPlace::new_stack_slot(
fx,
- fx.layout_of(fxcodegen_cx.tcx.mk_tup([in_lhs.layout().ty, fxcodegen_cx.tcx.types.bool].iter())),
+ fx.layout_of(fx.codegen_cx.tcx.mk_tup([in_lhs.layout().ty, fx.codegen_cx.tcx.types.bool].iter())),
);
let out_layout = out_place.layout();
out_place.write_cvalue(fx, CValue::by_val_pair(res, has_overflow, out_layout));
};
let val = fx.bcx.ins().fcmp(fltcc, lhs, rhs);
let val = fx.bcx.ins().bint(types::I8, val);
- return CValue::by_val(val, fx.layout_of(fxcodegen_cx.tcx.types.bool));
+ return CValue::by_val(val, fx.layout_of(fx.codegen_cx.tcx.types.bool));
}
_ => unreachable!("{:?}({:?}, {:?})", bin_op, in_lhs, in_rhs),
};
in_rhs: CValue<'tcx>,
) -> CValue<'tcx> {
let is_thin_ptr = in_lhs.layout().ty.builtin_deref(true).map(|TypeAndMut { ty, mutbl: _}| {
- !has_ptr_meta(fxcodegen_cx.tcx, ty)
+ !has_ptr_meta(fx.codegen_cx.tcx, ty)
}).unwrap_or(true);
if is_thin_ptr {
CValue::by_val(
fx.bcx.ins().bint(types::I8, res),
- fx.layout_of(fxcodegen_cx.tcx.types.bool),
+ fx.layout_of(fx.codegen_cx.tcx.types.bool),
)
}
}
fn codegen_print(fx: &mut FunctionCx<'_, '_, impl cranelift_module::Backend>, msg: &str) {
let puts = fx
- codegen_cx.module
+ .codegen_cx.module
.declare_function(
"puts",
Linkage::Import,
&Signature {
call_conv: CallConv::triple_default(fx.triple()),
- params: vec![AbiParam::new(pointer_ty(fxcodegen_cx.tcx))],
+ params: vec![AbiParam::new(pointer_ty(fx.codegen_cx.tcx))],
returns: vec![AbiParam::new(types::I32)],
},
)
.unwrap();
- let puts = fxcodegen_cx.module.declare_func_in_func(puts, &mut fx.bcx.func);
+ let puts = fx.codegen_cx.module.declare_func_in_func(puts, &mut fx.bcx.func);
#[cfg(debug_assertions)]
{
fx.add_comment(puts, "puts");
}
- let symbol_name = fxcodegen_cx.tcx.symbol_name(fx.instance);
+ let symbol_name = fx.codegen_cx.tcx.symbol_name(fx.instance);
let real_msg = format!("trap at {:?} ({}): {}\0", fx.instance, symbol_name, msg);
let msg_ptr = fx.anonymous_str("trap", &real_msg);
fx.bcx.ins().call(puts, &[msg_ptr]);
old_info: Option<Value>,
) -> Value {
let (source, target) =
- fxcodegen_cx.tcx
+ fx.codegen_cx.tcx
.struct_lockstep_tails_erasing_lifetimes(source, target, ParamEnv::reveal_all());
match (&source.kind, &target.kind) {
(&ty::Array(_, len), &ty::Slice(_)) => fx.bcx.ins().iconst(
fx.pointer_type,
- len.eval_usize(fxcodegen_cx.tcx, ParamEnv::reveal_all()) as i64,
+ len.eval_usize(fx.codegen_cx.tcx, ParamEnv::reveal_all()) as i64,
),
(&ty::Dynamic(..), &ty::Dynamic(..)) => {
// For now, upcasts are limited to changes in marker
let unaligned_offset = field_offset.bytes();
let (_, unsized_align) = crate::unsize::size_and_align_of_dst(fx, field_layout, extra);
- let one = fx.bcx.ins().iconst(pointer_ty(fxcodegen_cx.tcx), 1);
+ let one = fx.bcx.ins().iconst(pointer_ty(fx.codegen_cx.tcx), 1);
let align_sub_1 = fx.bcx.ins().isub(unsized_align, one);
let and_lhs = fx.bcx.ins().iadd_imm(align_sub_1, unaligned_offset as i64);
- let zero = fx.bcx.ins().iconst(pointer_ty(fxcodegen_cx.tcx), 0);
+ let zero = fx.bcx.ins().iconst(pointer_ty(fx.codegen_cx.tcx), 0);
let and_rhs = fx.bcx.ins().isub(zero, unsized_align);
let offset = fx.bcx.ins().band(and_lhs, and_rhs);
match self.0 {
CValueInner::ByRef(ptr, None) => {
let clif_ty = match layout.abi {
- Abi::Scalar(ref scalar) => scalar_to_clif_type(fxcodegen_cx.tcx, scalar.clone()),
+ Abi::Scalar(ref scalar) => scalar_to_clif_type(fx.codegen_cx.tcx, scalar.clone()),
Abi::Vector { ref element, count } => {
- scalar_to_clif_type(fxcodegen_cx.tcx, element.clone())
+ scalar_to_clif_type(fx.codegen_cx.tcx, element.clone())
.by(u16::try_from(count).unwrap()).unwrap()
}
_ => unreachable!("{:?}", layout.ty),
Abi::ScalarPair(a, b) => (a, b),
_ => unreachable!("load_scalar_pair({:?})", self),
};
- let b_offset = scalar_pair_calculate_b_offset(fxcodegen_cx.tcx, a_scalar, b_scalar);
- let clif_ty1 = scalar_to_clif_type(fxcodegen_cx.tcx, a_scalar.clone());
- let clif_ty2 = scalar_to_clif_type(fxcodegen_cx.tcx, b_scalar.clone());
+ let b_offset = scalar_pair_calculate_b_offset(fx.codegen_cx.tcx, a_scalar, b_scalar);
+ let clif_ty1 = scalar_to_clif_type(fx.codegen_cx.tcx, a_scalar.clone());
+ let clif_ty2 = scalar_to_clif_type(fx.codegen_cx.tcx, b_scalar.clone());
let val1 = ptr.load(fx, clif_ty1, MemFlags::new());
let val2 = ptr.offset(fx, b_offset).load(fx, clif_ty2, MemFlags::new());
(val1, val2)
assert_assignable(fx, a, b);
}
(ty::FnPtr(_), ty::FnPtr(_)) => {
- let from_sig = fxcodegen_cx.tcx.normalize_erasing_late_bound_regions(
+ let from_sig = fx.codegen_cx.tcx.normalize_erasing_late_bound_regions(
ParamEnv::reveal_all(),
- &from_ty.fn_sig(fxcodegen_cx.tcx),
+ &from_ty.fn_sig(fx.codegen_cx.tcx),
);
- let to_sig = fxcodegen_cx.tcx.normalize_erasing_late_bound_regions(
+ let to_sig = fx.codegen_cx.tcx.normalize_erasing_late_bound_regions(
ParamEnv::reveal_all(),
- &to_ty.fn_sig(fxcodegen_cx.tcx),
+ &to_ty.fn_sig(fx.codegen_cx.tcx),
);
assert_eq!(
from_sig, to_sig,
}
(ty::Dynamic(from_traits, _), ty::Dynamic(to_traits, _)) => {
let from_traits = fx
- codegen_cx.tcx
+ .codegen_cx.tcx
.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), from_traits);
let to_traits = fx
- codegen_cx.tcx
+ .codegen_cx.tcx
.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), to_traits);
assert_eq!(
from_traits, to_traits,
}
Abi::ScalarPair(ref a_scalar, ref b_scalar) => {
let (value, extra) = from.load_scalar_pair(fx);
- let b_offset = scalar_pair_calculate_b_offset(fxcodegen_cx.tcx, a_scalar, b_scalar);
+ let b_offset = scalar_pair_calculate_b_offset(fx.codegen_cx.tcx, a_scalar, b_scalar);
to_ptr.store(fx, value, MemFlags::new());
to_ptr.offset(fx, b_offset).store(fx, extra, MemFlags::new());
return;
let src_align = src_layout.align.abi.bytes() as u8;
let dst_align = dst_layout.align.abi.bytes() as u8;
fx.bcx.emit_small_memory_copy(
- fxcodegen_cx.module.target_config(),
+ fx.codegen_cx.module.target_config(),
to_addr,
from_addr,
size,
pub(crate) fn place_deref(self, fx: &mut FunctionCx<'_, 'tcx, impl Backend>) -> CPlace<'tcx> {
let inner_layout = fx.layout_of(self.layout().ty.builtin_deref(true).unwrap().ty);
- if has_ptr_meta(fxcodegen_cx.tcx, inner_layout.ty) {
+ if has_ptr_meta(fx.codegen_cx.tcx, inner_layout.ty) {
let (addr, extra) = self.to_cvalue(fx).load_scalar_pair(fx);
CPlace::for_ptr_with_extra(Pointer::new(addr), extra, inner_layout)
} else {
}
pub(crate) fn write_place_ref(self, fx: &mut FunctionCx<'_, 'tcx, impl Backend>, dest: CPlace<'tcx>) {
- if has_ptr_meta(fxcodegen_cx.tcx, self.layout().ty) {
+ if has_ptr_meta(fx.codegen_cx.tcx, self.layout().ty) {
let (ptr, extra) = self.to_ptr_maybe_unsized();
let ptr = CValue::by_val_pair(
ptr.get_addr(fx),
}
pub(crate) fn drop_fn_of_obj(fx: &mut FunctionCx<'_, '_, impl Backend>, vtable: Value) -> Value {
- let usize_size = fx.layout_of(fxcodegen_cx.tcx.types.usize).size.bytes() as usize;
+ let usize_size = fx.layout_of(fx.codegen_cx.tcx.types.usize).size.bytes() as usize;
fx.bcx.ins().load(
- pointer_ty(fxcodegen_cx.tcx),
+ pointer_ty(fx.codegen_cx.tcx),
vtable_memflags(),
vtable,
(DROP_FN_INDEX * usize_size) as i32,
}
pub(crate) fn size_of_obj(fx: &mut FunctionCx<'_, '_, impl Backend>, vtable: Value) -> Value {
- let usize_size = fx.layout_of(fxcodegen_cx.tcx.types.usize).size.bytes() as usize;
+ let usize_size = fx.layout_of(fx.codegen_cx.tcx.types.usize).size.bytes() as usize;
fx.bcx.ins().load(
- pointer_ty(fxcodegen_cx.tcx),
+ pointer_ty(fx.codegen_cx.tcx),
vtable_memflags(),
vtable,
(SIZE_INDEX * usize_size) as i32,
}
pub(crate) fn min_align_of_obj(fx: &mut FunctionCx<'_, '_, impl Backend>, vtable: Value) -> Value {
- let usize_size = fx.layout_of(fxcodegen_cx.tcx.types.usize).size.bytes() as usize;
+ let usize_size = fx.layout_of(fx.codegen_cx.tcx.types.usize).size.bytes() as usize;
fx.bcx.ins().load(
- pointer_ty(fxcodegen_cx.tcx),
+ pointer_ty(fx.codegen_cx.tcx),
vtable_memflags(),
vtable,
(ALIGN_INDEX * usize_size) as i32,
)
};
- let usize_size = fx.layout_of(fxcodegen_cx.tcx.types.usize).size.bytes();
+ let usize_size = fx.layout_of(fx.codegen_cx.tcx.types.usize).size.bytes();
let func_ref = fx.bcx.ins().load(
- pointer_ty(fxcodegen_cx.tcx),
+ pointer_ty(fx.codegen_cx.tcx),
vtable_memflags(),
vtable,
((idx + 3) * usize_size as usize) as i32,
data_id
};
- let local_data_id = fxcodegen_cx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
+ let local_data_id = fx.codegen_cx.module.declare_data_in_func(data_id, &mut fx.bcx.func);
fx.bcx.ins().global_value(fx.pointer_type, local_data_id)
}
layout: TyAndLayout<'tcx>,
trait_ref: Option<ty::PolyExistentialTraitRef<'tcx>>,
) -> DataId {
- let tcx = fxcodegen_cx.tcx;
- let usize_size = fx.layout_of(fxcodegen_cx.tcx.types.usize).size.bytes() as usize;
+ let tcx = fx.codegen_cx.tcx;
+ let usize_size = fx.layout_of(fx.codegen_cx.tcx.types.usize).size.bytes() as usize;
let drop_in_place_fn =
- import_function(tcx, fxcodegen_cx.module, Instance::resolve_drop_in_place(tcx, layout.ty).polymorphize(fxcodegen_cx.tcx));
+ import_function(tcx, fx.codegen_cx.module, Instance::resolve_drop_in_place(tcx, layout.ty).polymorphize(fx.codegen_cx.tcx));
let mut components: Vec<_> = vec![Some(drop_in_place_fn), None, None];
opt_mth.map_or(None, |(def_id, substs)| {
Some(import_function(
tcx,
- fxcodegen_cx.module,
- Instance::resolve_for_vtable(tcx, ParamEnv::reveal_all(), def_id, substs).unwrap().polymorphize(fxcodegen_cx.tcx),
+ fx.codegen_cx.module,
+ Instance::resolve_for_vtable(tcx, ParamEnv::reveal_all(), def_id, substs).unwrap().polymorphize(fx.codegen_cx.tcx),
))
})
});
.collect::<Vec<u8>>()
.into_boxed_slice();
- write_usize(fxcodegen_cx.tcx, &mut data, SIZE_INDEX, layout.size.bytes());
- write_usize(fxcodegen_cx.tcx, &mut data, ALIGN_INDEX, layout.align.abi.bytes());
+ write_usize(fx.codegen_cx.tcx, &mut data, SIZE_INDEX, layout.size.bytes());
+ write_usize(fx.codegen_cx.tcx, &mut data, ALIGN_INDEX, layout.align.abi.bytes());
data_ctx.define(data);
for (i, component) in components.into_iter().enumerate() {
if let Some(func_id) = component {
- let func_ref = fxcodegen_cx.module.declare_func_in_data(func_id, &mut data_ctx);
+ let func_ref = fx.codegen_cx.module.declare_func_in_data(func_id, &mut data_ctx);
data_ctx.write_function_addr((i * usize_size) as u32, func_ref);
}
}
let data_id = fx
- codegen_cx.module
+ .codegen_cx.module
.declare_data(
&format!(
"__vtable.{}.for.{:?}.{}",
false,
false,
Some(
- fxcodegen_cx.tcx
+ fx.codegen_cx.tcx
.data_layout
.pointer_align
.pref
)
.unwrap();
- fxcodegen_cx.module.define_data(data_id, &data_ctx).unwrap();
+ fx.codegen_cx.module.define_data(data_id, &data_ctx).unwrap();
data_id
}