//! Handling of everything related to the calling convention. Also fills `fx.local_map`.
-#[cfg(debug_assertions)]
mod comments;
mod pass_mode;
mod returning;
use rustc_target::spec::abi::Abi;
use cranelift_codegen::ir::AbiParam;
+use smallvec::smallvec;
use self::pass_mode::*;
use crate::prelude::*;
pub(crate) use self::returning::{can_return_to_ssa_var, codegen_return};
-// FIXME remove
-// Copied from https://github.com/rust-lang/rust/blob/f52c72948aa1dd718cc1f168d21c91c584c0a662/src/librustc_middle/ty/layout.rs#L2301
-#[rustfmt::skip]
-pub(crate) fn fn_sig_for_fn_abi<'tcx>(tcx: TyCtxt<'tcx>, instance: Instance<'tcx>) -> ty::PolyFnSig<'tcx> {
- use rustc_middle::ty::subst::Subst;
-
- // FIXME(davidtwco,eddyb): A `ParamEnv` should be passed through to this function.
- let ty = instance.ty(tcx, ty::ParamEnv::reveal_all());
- match *ty.kind() {
- ty::FnDef(..) => {
- // HACK(davidtwco,eddyb): This is a workaround for polymorphization considering
- // parameters unused if they show up in the signature, but not in the `mir::Body`
- // (i.e. due to being inside a projection that got normalized, see
- // `src/test/ui/polymorphization/normalized_sig_types.rs`), and codegen not keeping
- // track of a polymorphization `ParamEnv` to allow normalizing later.
- let mut sig = match *ty.kind() {
- ty::FnDef(def_id, substs) => tcx
- .normalize_erasing_regions(tcx.param_env(def_id), tcx.fn_sig(def_id))
- .subst(tcx, substs),
- _ => unreachable!(),
- };
-
- if let ty::InstanceDef::VtableShim(..) = instance.def {
- // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
- sig = sig.map_bound(|mut sig| {
- let mut inputs_and_output = sig.inputs_and_output.to_vec();
- inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
- sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
- sig
- });
- }
- sig
- }
- ty::Closure(def_id, substs) => {
- let sig = substs.as_closure().sig();
-
- let env_ty = tcx.closure_env_ty(def_id, substs).unwrap();
- sig.map_bound(|sig| {
- tcx.mk_fn_sig(
- std::iter::once(env_ty.skip_binder()).chain(sig.inputs().iter().cloned()),
- sig.output(),
- sig.c_variadic,
- sig.unsafety,
- sig.abi,
- )
- })
- }
- ty::Generator(_, substs, _) => {
- let sig = substs.as_generator().poly_sig();
-
- let env_region = ty::ReLateBound(ty::INNERMOST, ty::BoundRegion { kind: ty::BrEnv });
- let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
-
- let pin_did = tcx.require_lang_item(rustc_hir::LangItem::Pin, None);
- let pin_adt_ref = tcx.adt_def(pin_did);
- let pin_substs = tcx.intern_substs(&[env_ty.into()]);
- let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
-
- sig.map_bound(|sig| {
- let state_did = tcx.require_lang_item(rustc_hir::LangItem::GeneratorState, None);
- let state_adt_ref = tcx.adt_def(state_did);
- let state_substs =
- tcx.intern_substs(&[sig.yield_ty.into(), sig.return_ty.into()]);
- let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
-
- tcx.mk_fn_sig(
- [env_ty, sig.resume_ty].iter(),
- &ret_ty,
- false,
- rustc_hir::Unsafety::Normal,
- rustc_target::spec::abi::Abi::Rust,
- )
- })
- }
- _ => bug!("unexpected type {:?} in Instance::fn_sig", ty),
- }
-}
-
fn clif_sig_from_fn_abi<'tcx>(
tcx: TyCtxt<'tcx>,
triple: &target_lexicon::Triple,
Conv::X86_64SysV => CallConv::SystemV,
Conv::X86_64Win64 => CallConv::WindowsFastcall,
Conv::ArmAapcs
+ | Conv::CCmseNonSecureCall
| Conv::Msp430Intr
| Conv::PtxKernel
| Conv::X86Fastcall
| Conv::X86VectorCall
| Conv::AmdGpuKernel
| Conv::AvrInterrupt
- | Conv::AvrNonBlockingInterrupt => {
- todo!("{:?}", fn_abi.conv)
- }
+ | Conv::AvrNonBlockingInterrupt => todo!("{:?}", fn_abi.conv),
};
- let inputs = fn_abi
- .args
- .iter()
- .map(|arg_abi| arg_abi.get_abi_param(tcx).into_iter())
- .flatten();
+ let inputs = fn_abi.args.iter().map(|arg_abi| arg_abi.get_abi_param(tcx).into_iter()).flatten();
let (return_ptr, returns) = fn_abi.ret.get_abi_return(tcx);
// Sometimes the first param is an pointer to the place where the return value needs to be stored.
let params: Vec<_> = return_ptr.into_iter().chain(inputs).collect();
- Signature {
- params,
- returns,
- call_conv,
- }
+ Signature { params, returns, call_conv }
}
pub(crate) fn get_function_sig<'tcx>(
tcx: TyCtxt<'tcx>,
triple: &target_lexicon::Triple,
inst: Instance<'tcx>,
- support_vararg: bool,
) -> Signature {
assert!(!inst.substs.needs_infer());
- let fn_sig = tcx
- .normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), fn_sig_for_fn_abi(tcx, inst));
- if fn_sig.c_variadic && !support_vararg {
- tcx.sess.span_fatal(
- tcx.def_span(inst.def_id()),
- "Variadic function definitions are not yet supported",
- );
- }
-
- clif_sig_from_fn_abi(
- tcx,
- triple,
- &FnAbi::of_instance(&RevealAllLayoutCx(tcx), inst, &[]),
- )
+ clif_sig_from_fn_abi(tcx, triple, &FnAbi::of_instance(&RevealAllLayoutCx(tcx), inst, &[]))
}
/// Instance must be monomorphized
pub(crate) fn import_function<'tcx>(
tcx: TyCtxt<'tcx>,
- module: &mut impl Module,
+ module: &mut dyn Module,
inst: Instance<'tcx>,
) -> FuncId {
- let name = tcx.symbol_name(inst).name.to_string();
- let sig = get_function_sig(tcx, module.isa().triple(), inst, true);
- module
- .declare_function(&name, Linkage::Import, &sig)
- .unwrap()
+ let name = tcx.symbol_name(inst).name;
+ let sig = get_function_sig(tcx, module.isa().triple(), inst);
+ module.declare_function(name, Linkage::Import, &sig).unwrap()
}
-impl<'tcx, M: Module> FunctionCx<'_, 'tcx, M> {
+impl<'tcx> FunctionCx<'_, '_, 'tcx> {
/// Instance must be monomorphized
pub(crate) fn get_function_ref(&mut self, inst: Instance<'tcx>) -> FuncRef {
- let func_id = import_function(self.tcx, &mut self.cx.module, inst);
- let func_ref = self
- .cx
- .module
- .declare_func_in_func(func_id, &mut self.bcx.func);
+ let func_id = import_function(self.tcx, self.module, inst);
+ let func_ref = self.module.declare_func_in_func(func_id, &mut self.bcx.func);
- #[cfg(debug_assertions)]
- self.add_comment(func_ref, format!("{:?}", inst));
+ if self.clif_comments.enabled() {
+ self.add_comment(func_ref, format!("{:?}", inst));
+ }
func_ref
}
pub(crate) fn lib_call(
&mut self,
name: &str,
- input_tys: Vec<types::Type>,
- output_tys: Vec<types::Type>,
+ params: Vec<AbiParam>,
+ returns: Vec<AbiParam>,
args: &[Value],
) -> &[Value] {
- let sig = Signature {
- params: input_tys.iter().cloned().map(AbiParam::new).collect(),
- returns: output_tys.iter().cloned().map(AbiParam::new).collect(),
- call_conv: CallConv::triple_default(self.triple()),
- };
- let func_id = self
- .cx
- .module
- .declare_function(&name, Linkage::Import, &sig)
- .unwrap();
- let func_ref = self
- .cx
- .module
- .declare_func_in_func(func_id, &mut self.bcx.func);
+ let sig = Signature { params, returns, call_conv: CallConv::triple_default(self.triple()) };
+ let func_id = self.module.declare_function(name, Linkage::Import, &sig).unwrap();
+ let func_ref = self.module.declare_func_in_func(func_id, &mut self.bcx.func);
let call_inst = self.bcx.ins().call(func_ref, args);
- #[cfg(debug_assertions)]
- {
+ if self.clif_comments.enabled() {
self.add_comment(call_inst, format!("easy_call {}", name));
}
let results = self.bcx.inst_results(call_inst);
let (input_tys, args): (Vec<_>, Vec<_>) = args
.iter()
.map(|arg| {
- (
- self.clif_type(arg.layout().ty).unwrap(),
- arg.load_scalar(self),
- )
+ (AbiParam::new(self.clif_type(arg.layout().ty).unwrap()), arg.load_scalar(self))
})
.unzip();
let return_layout = self.layout_of(return_ty);
let return_tys = if let ty::Tuple(tup) = return_ty.kind() {
- tup.types().map(|ty| self.clif_type(ty).unwrap()).collect()
+ tup.types().map(|ty| AbiParam::new(self.clif_type(ty).unwrap())).collect()
} else {
- vec![self.clif_type(return_ty).unwrap()]
+ vec![AbiParam::new(self.clif_type(return_ty).unwrap())]
};
let ret_vals = self.lib_call(name, input_tys, return_tys, &args);
match *ret_vals {
/// Make a [`CPlace`] capable of holding value of the specified type.
fn make_local_place<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
local: Local,
layout: TyAndLayout<'tcx>,
is_ssa: bool,
CPlace::new_stack_slot(fx, layout)
};
- #[cfg(debug_assertions)]
self::comments::add_local_place_comments(fx, place, local);
place
}
-pub(crate) fn codegen_fn_prelude<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
- start_block: Block,
-) {
+pub(crate) fn codegen_fn_prelude<'tcx>(fx: &mut FunctionCx<'_, '_, 'tcx>, start_block: Block) {
+ fx.bcx.append_block_params_for_function_params(start_block);
+
+ fx.bcx.switch_to_block(start_block);
+ fx.bcx.ins().nop();
+
let ssa_analyzed = crate::analyze::analyze(fx);
- #[cfg(debug_assertions)]
self::comments::add_args_header_comment(fx);
- let ret_place = self::returning::codegen_return_param(fx, &ssa_analyzed, start_block);
+ let mut block_params_iter = fx.bcx.func.dfg.block_params(start_block).to_vec().into_iter();
+ let ret_place =
+ self::returning::codegen_return_param(fx, &ssa_analyzed, &mut block_params_iter);
assert_eq!(fx.local_map.push(ret_place), RETURN_PLACE);
// None means pass_mode == NoPass
let mut params = Vec::new();
for (i, _arg_ty) in tupled_arg_tys.types().enumerate() {
let arg_abi = arg_abis_iter.next().unwrap();
- let param = cvalue_for_param(fx, start_block, Some(local), Some(i), arg_abi);
+ let param =
+ cvalue_for_param(fx, Some(local), Some(i), arg_abi, &mut block_params_iter);
params.push(param);
}
(local, ArgKind::Spread(params), arg_ty)
} else {
let arg_abi = arg_abis_iter.next().unwrap();
- let param = cvalue_for_param(fx, start_block, Some(local), None, arg_abi);
+ let param =
+ cvalue_for_param(fx, Some(local), None, arg_abi, &mut block_params_iter);
(local, ArgKind::Normal(param), arg_ty)
}
})
if fx.instance.def.requires_caller_location(fx.tcx) {
// Store caller location for `#[track_caller]`.
let arg_abi = arg_abis_iter.next().unwrap();
- fx.caller_location = Some(cvalue_for_param(fx, start_block, None, None, arg_abi).unwrap());
+ fx.caller_location =
+ Some(cvalue_for_param(fx, None, None, arg_abi, &mut block_params_iter).unwrap());
}
assert!(arg_abis_iter.next().is_none(), "ArgAbi left behind");
fx.fn_abi = Some(fn_abi);
+ assert!(block_params_iter.next().is_none(), "arg_value left behind");
- fx.bcx.switch_to_block(start_block);
- fx.bcx.ins().nop();
-
- #[cfg(debug_assertions)]
self::comments::add_locals_header_comment(fx);
for (local, arg_kind, ty) in func_params {
if let Some((addr, meta)) = val.try_to_ptr() {
let local_decl = &fx.mir.local_decls[local];
// v this ! is important
- let internally_mutable = !val.layout().ty.is_freeze(
- fx.tcx.at(local_decl.source_info.span),
- ParamEnv::reveal_all(),
- );
+ let internally_mutable = !val
+ .layout()
+ .ty
+ .is_freeze(fx.tcx.at(local_decl.source_info.span), ParamEnv::reveal_all());
if local_decl.mutability == mir::Mutability::Not && !internally_mutable {
// We wont mutate this argument, so it is fine to borrow the backing storage
// of this argument, to prevent a copy.
CPlace::for_ptr(addr, val.layout())
};
- #[cfg(debug_assertions)]
self::comments::add_local_place_comments(fx, place, local);
assert_eq!(fx.local_map.push(place), local);
ArgKind::Spread(params) => {
for (i, param) in params.into_iter().enumerate() {
if let Some(param) = param {
- place
- .place_field(fx, mir::Field::new(i))
- .write_cvalue(fx, param);
+ place.place_field(fx, mir::Field::new(i)).write_cvalue(fx, param);
}
}
}
assert_eq!(fx.local_map.push(place), local);
}
- fx.bcx
- .ins()
- .jump(*fx.block_map.get(START_BLOCK).unwrap(), &[]);
+ fx.bcx.ins().jump(*fx.block_map.get(START_BLOCK).unwrap(), &[]);
}
pub(crate) fn codegen_terminator_call<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
span: Span,
- current_block: Block,
func: &Operand<'tcx>,
args: &[Operand<'tcx>],
destination: Option<(Place<'tcx>, BasicBlock)>,
) {
let fn_ty = fx.monomorphize(func.ty(fx.mir, fx.tcx));
- let fn_sig = fx
- .tcx
- .normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), fn_ty.fn_sig(fx.tcx));
+ let fn_sig =
+ fx.tcx.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), fn_ty.fn_sig(fx.tcx));
let destination = destination.map(|(place, bb)| (codegen_place(fx, place), bb));
let fn_abi = if let Some(instance) = instance {
FnAbi::of_instance(&RevealAllLayoutCx(fx.tcx), instance, &extra_args)
} else {
- FnAbi::of_fn_ptr(
- &RevealAllLayoutCx(fx.tcx),
- fn_ty.fn_sig(fx.tcx),
- &extra_args,
- )
+ FnAbi::of_fn_ptr(&RevealAllLayoutCx(fx.tcx), fn_ty.fn_sig(fx.tcx), &extra_args)
};
let is_cold = instance
- .map(|inst| {
- fx.tcx
- .codegen_fn_attrs(inst.def_id())
- .flags
- .contains(CodegenFnAttrFlags::COLD)
- })
+ .map(|inst| fx.tcx.codegen_fn_attrs(inst.def_id()).flags.contains(CodegenFnAttrFlags::COLD))
.unwrap_or(false);
if is_cold {
- fx.cold_blocks.insert(current_block);
+ // FIXME Mark current_block block as cold once Cranelift supports it
}
// Unpack arguments tuple for closures
}
args
} else {
- args.iter()
- .map(|arg| codegen_operand(fx, arg))
- .collect::<Vec<_>>()
+ args.iter().map(|arg| codegen_operand(fx, arg)).collect::<Vec<_>>()
};
// | indirect call target
// v v
let (func_ref, first_arg) = match instance {
// Trait object call
- Some(Instance {
- def: InstanceDef::Virtual(_, idx),
- ..
- }) => {
- #[cfg(debug_assertions)]
- {
+ Some(Instance { def: InstanceDef::Virtual(_, idx), .. }) => {
+ if fx.clif_comments.enabled() {
let nop_inst = fx.bcx.ins().nop();
fx.add_comment(
nop_inst,
);
}
let (ptr, method) = crate::vtable::get_ptr_and_method_ref(fx, args[0], idx);
- (Some(method), Single(ptr))
+ (Some(method), smallvec![ptr])
}
// Normal call
None,
args.get(0)
.map(|arg| adjust_arg_for_abi(fx, *arg, &fn_abi.args[0]))
- .unwrap_or(Empty),
+ .unwrap_or(smallvec![]),
),
// Indirect call
None => {
- #[cfg(debug_assertions)]
- {
+ if fx.clif_comments.enabled() {
let nop_inst = fx.bcx.ins().nop();
fx.add_comment(nop_inst, "indirect call");
}
Some(func),
args.get(0)
.map(|arg| adjust_arg_for_abi(fx, *arg, &fn_abi.args[0]))
- .unwrap_or(Empty),
+ .unwrap_or(smallvec![]),
)
}
};
)
.collect::<Vec<_>>();
- if instance
- .map(|inst| inst.def.requires_caller_location(fx.tcx))
- .unwrap_or(false)
- {
+ if instance.map(|inst| inst.def.requires_caller_location(fx.tcx)).unwrap_or(false) {
// Pass the caller location for `#[track_caller]`.
let caller_location = fx.get_caller_location(span);
call_args.extend(
// FIXME find a cleaner way to support varargs
if fn_sig.c_variadic {
- if fn_sig.abi != Abi::C {
- fx.tcx.sess.span_fatal(
- span,
- &format!("Variadic call for non-C abi {:?}", fn_sig.abi),
- );
+ if !matches!(fn_sig.abi, Abi::C { .. }) {
+ fx.tcx.sess.span_fatal(span, &format!("Variadic call for non-C abi {:?}", fn_sig.abi));
}
let sig_ref = fx.bcx.func.dfg.call_signature(call_inst).unwrap();
let abi_params = call_args
let ty = fx.bcx.func.dfg.value_type(arg);
if !ty.is_int() {
// FIXME set %al to upperbound on float args once floats are supported
- fx.tcx
- .sess
- .span_fatal(span, &format!("Non int ty {:?} for variadic call", ty));
+ fx.tcx.sess.span_fatal(span, &format!("Non int ty {:?} for variadic call", ty));
}
AbiParam::new(ty)
})
}
pub(crate) fn codegen_drop<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Module>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
span: Span,
drop_place: CPlace<'tcx>,
) {
fx,
fx.layout_of(fx.tcx.mk_ref(
&ty::RegionKind::ReErased,
- TypeAndMut {
- ty,
- mutbl: crate::rustc_hir::Mutability::Mut,
- },
+ TypeAndMut { ty, mutbl: crate::rustc_hir::Mutability::Mut },
)),
);
let arg_value = adjust_arg_for_abi(fx, arg_value, &fn_abi.args[0]);