4 use rustc_target::spec::abi::Abi;
8 pub fn cton_sig_from_fn_ty<'a, 'tcx: 'a>(
9 tcx: TyCtxt<'a, 'tcx, 'tcx>,
12 let sig = ty_fn_sig(tcx, fn_ty);
13 assert!(!sig.variadic, "Variadic function are not yet supported");
14 let (call_conv, inputs, _output): (CallConv, Vec<Ty>, Ty) = match sig.abi {
15 Abi::Rust => (CallConv::Fast, sig.inputs().to_vec(), sig.output()),
16 Abi::C => (CallConv::SystemV, sig.inputs().to_vec(), sig.output()),
19 "rust-call sig: {:?} inputs: {:?} output: {:?}",
24 assert_eq!(sig.inputs().len(), 2);
25 let extra_args = match sig.inputs().last().unwrap().sty {
26 ty::TyTuple(ref tupled_arguments) => tupled_arguments,
27 _ => bug!("argument to function with \"rust-call\" ABI is not a tuple"),
29 let mut inputs: Vec<Ty> = vec![sig.inputs()[0]];
30 inputs.extend(extra_args.into_iter());
31 (CallConv::Fast, inputs, sig.output())
33 Abi::System => bug!("system abi should be selected elsewhere"),
34 Abi::RustIntrinsic => (CallConv::SystemV, sig.inputs().to_vec(), sig.output()),
35 _ => unimplemented!("unsupported abi {:?}", sig.abi),
38 params: Some(types::I64).into_iter() // First param is place to put return val
39 .chain(inputs.into_iter().map(|ty| {
40 let cton_ty = cton_type_from_ty(tcx, ty);
41 if let Some(cton_ty) = cton_ty {
44 if sig.abi == Abi::C {
45 unimplemented!("Non scalars are not yet supported for \"C\" abi");
50 .map(AbiParam::new).collect(),
57 fn ty_fn_sig<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> ty::FnSig<'tcx> {
58 let sig = match ty.sty {
60 // Shims currently have type TyFnPtr. Not sure this should remain.
61 ty::TyFnPtr(_) => ty.fn_sig(tcx),
62 ty::TyClosure(def_id, substs) => {
63 let sig = substs.closure_sig(def_id, tcx);
65 let env_ty = tcx.closure_env_ty(def_id, substs).unwrap();
66 sig.map_bound(|sig| tcx.mk_fn_sig(
67 iter::once(*env_ty.skip_binder()).chain(sig.inputs().iter().cloned()),
74 ty::TyGenerator(def_id, substs, _) => {
75 let sig = substs.poly_sig(def_id, tcx);
77 let env_region = ty::ReLateBound(ty::INNERMOST, ty::BrEnv);
78 let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
81 let state_did = tcx.lang_items().gen_state().unwrap();
82 let state_adt_ref = tcx.adt_def(state_did);
83 let state_substs = tcx.intern_substs(&[
87 let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
89 tcx.mk_fn_sig(iter::once(env_ty),
92 hir::Unsafety::Normal,
97 _ => bug!("unexpected type {:?} to ty_fn_sig", ty)
99 tcx.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), &sig)
102 impl<'a, 'tcx: 'a> FunctionCx<'a, 'tcx> {
103 /// Instance must be monomorphized
104 pub fn get_function_ref(&mut self, inst: Instance<'tcx>) -> FuncRef {
105 assert!(!inst.substs.needs_infer() && !inst.substs.has_param_types());
106 let fn_ty = inst.ty(self.tcx);
107 let sig = cton_sig_from_fn_ty(self.tcx, fn_ty);
108 let def_path_based_names =
109 ::rustc_mir::monomorphize::item::DefPathBasedNames::new(self.tcx, false, false);
110 let mut name = String::new();
111 def_path_based_names.push_instance_as_string(inst, &mut name);
114 .declare_function(&name, Linkage::Import, &sig)
117 .declare_func_in_func(func_id, &mut self.bcx.func)
123 input_tys: Vec<types::Type>,
124 output_ty: Option<types::Type>,
127 let sig = Signature {
128 params: input_tys.iter().cloned().map(AbiParam::new).collect(),
129 returns: vec![AbiParam::new(output_ty.unwrap_or(types::VOID))],
130 call_conv: CallConv::SystemV,
131 argument_bytes: None,
135 .declare_function(&name, Linkage::Import, &sig)
139 .declare_func_in_func(func_id, &mut self.bcx.func);
140 let call_inst = self.bcx.ins().call(func_ref, args);
141 if output_ty.is_none() {
144 let results = self.bcx.inst_results(call_inst);
145 assert_eq!(results.len(), 1);
152 args: &[CValue<'tcx>],
155 let (input_tys, args): (Vec<_>, Vec<_>) = args
159 self.cton_type(arg.layout().ty).unwrap(),
160 arg.load_value(self),
163 let return_layout = self.layout_of(return_ty);
164 let return_ty = if let TypeVariants::TyTuple(tup) = return_ty.sty {
166 bug!("easy_call( (...) -> <non empty tuple> ) is not allowed");
170 Some(self.cton_type(return_ty).unwrap())
172 if let Some(val) = self.lib_call(name, input_tys, return_ty, &args) {
173 CValue::ByVal(val, return_layout)
175 CValue::ByRef(self.bcx.ins().iconst(types::I64, 0), return_layout)
179 fn self_sig(&self) -> FnSig<'tcx> {
180 ty_fn_sig(self.tcx, self.instance.ty(self.tcx))
183 fn return_type(&self) -> Ty<'tcx> {
184 self.self_sig().output()
188 pub fn codegen_fn_prelude<'a, 'tcx: 'a>(fx: &mut FunctionCx<'a, 'tcx>, start_ebb: Ebb) {
189 let ssa_analyzed = crate::analyze::analyze(fx);
190 fx.tcx.sess.warn(&format!("ssa {:?}", ssa_analyzed));
192 match fx.self_sig().abi {
193 Abi::Rust | Abi::RustCall => {}
194 _ => unimplemented!("declared function with non \"rust\" or \"rust-call\" abi"),
197 let ret_param = fx.bcx.append_ebb_param(start_ebb, types::I64);
204 let func_params = fx.mir.args_iter().map(|local| {
205 let arg_ty = fx.monomorphize(&fx.mir.local_decls[local].ty);
207 // Adapted from https://github.com/rust-lang/rust/blob/145155dc96757002c7b2e9de8489416e2fdbbd57/src/librustc_codegen_llvm/mir/mod.rs#L442-L482
208 if Some(local) == fx.mir.spread_arg {
209 // This argument (e.g. the last argument in the "rust-call" ABI)
210 // is a tuple that was spread at the ABI level and now we have
211 // to reconstruct it into a tuple local variable, from multiple
212 // individual function arguments.
214 let tupled_arg_tys = match arg_ty.sty {
215 ty::TyTuple(ref tys) => tys,
216 _ => bug!("spread argument isn't a tuple?! but {:?}", arg_ty),
219 let mut ebb_params = Vec::new();
220 for arg_ty in tupled_arg_tys.iter() {
221 let cton_type = fx.cton_type(arg_ty).unwrap_or(types::I64);
222 ebb_params.push(fx.bcx.append_ebb_param(start_ebb, cton_type));
225 (local, ArgKind::Spread(ebb_params), arg_ty)
227 let cton_type = fx.cton_type(arg_ty).unwrap_or(types::I64);
228 (local, ArgKind::Normal(fx.bcx.append_ebb_param(start_ebb, cton_type)), arg_ty)
230 }).collect::<Vec<(Local, ArgKind, Ty)>>();
232 let ret_layout = fx.layout_of(fx.return_type());
234 .insert(RETURN_PLACE, CPlace::Addr(ret_param, ret_layout));
236 for (local, arg_kind, ty) in func_params {
237 let layout = fx.layout_of(ty);
239 if let ArgKind::Normal(ebb_param) = arg_kind {
243 .contains(crate::analyze::Flags::NOT_SSA)
245 let var = Variable(local);
246 fx.bcx.declare_var(var, fx.cton_type(ty).unwrap());
247 fx.bcx.def_var(var, ebb_param);
248 fx.local_map.insert(local, CPlace::Var(var, layout));
253 let stack_slot = fx.bcx.create_stack_slot(StackSlotData {
254 kind: StackSlotKind::ExplicitSlot,
255 size: layout.size.bytes() as u32,
259 let place = CPlace::from_stack_slot(fx, stack_slot, ty);
262 ArgKind::Normal(ebb_param) => {
263 if fx.cton_type(ty).is_some() {
264 place.write_cvalue(fx, CValue::ByVal(ebb_param, place.layout()));
266 place.write_cvalue(fx, CValue::ByRef(ebb_param, place.layout()));
269 ArgKind::Spread(ebb_params) => {
270 for (i, ebb_param) in ebb_params.into_iter().enumerate() {
271 let sub_place = place.place_field(fx, mir::Field::new(i));
272 if fx.cton_type(sub_place.layout().ty).is_some() {
273 sub_place.write_cvalue(fx, CValue::ByVal(ebb_param, sub_place.layout()));
275 sub_place.write_cvalue(fx, CValue::ByRef(ebb_param, sub_place.layout()));
280 fx.local_map.insert(local, place);
283 for local in fx.mir.vars_and_temps_iter() {
284 let ty = fx.mir.local_decls[local].ty;
285 let layout = fx.layout_of(ty);
287 let place = if ssa_analyzed
290 .contains(crate::analyze::Flags::NOT_SSA)
292 let stack_slot = fx.bcx.create_stack_slot(StackSlotData {
293 kind: StackSlotKind::ExplicitSlot,
294 size: layout.size.bytes() as u32,
297 CPlace::from_stack_slot(fx, stack_slot, ty)
299 let var = Variable(local);
300 fx.bcx.declare_var(var, fx.cton_type(ty).unwrap());
301 CPlace::Var(var, layout)
304 fx.local_map.insert(local, place);
308 pub fn codegen_call<'a, 'tcx: 'a>(
309 fx: &mut FunctionCx<'a, 'tcx>,
310 func: &Operand<'tcx>,
311 args: &[Operand<'tcx>],
312 destination: &Option<(Place<'tcx>, BasicBlock)>,
314 let func = trans_operand(fx, func);
315 let fn_ty = func.layout().ty;
316 let sig = ty_fn_sig(fx.tcx, fn_ty);
318 // Unpack arguments tuple for closures
319 let args = if sig.abi == Abi::RustCall {
320 assert_eq!(args.len(), 2, "rust-call abi requires two arguments");
321 let self_arg = trans_operand(fx, &args[0]);
322 let pack_arg = trans_operand(fx, &args[1]);
323 let mut args = Vec::new();
325 match pack_arg.layout().ty.sty {
326 ty::TyTuple(ref tupled_arguments) => {
327 for (i, _) in tupled_arguments.iter().enumerate() {
328 args.push(pack_arg.value_field(fx, mir::Field::new(i)));
331 _ => bug!("argument to function with \"rust-call\" ABI is not a tuple"),
335 pack_arg.layout().ty,
336 args.iter().map(|a| a.layout().ty).collect::<Vec<_>>()
341 .map(|arg| trans_operand(fx, arg))
345 let destination = destination.as_ref().map(|(place, bb)| {
346 (trans_place(fx, place), *bb)
349 if codegen_intrinsic_call(fx, fn_ty, sig, &args, destination) {
353 let return_ptr = match destination {
354 Some((place, _)) => place.expect_addr(),
355 None => fx.bcx.ins().iconst(types::I64, 0),
358 let call_args = Some(return_ptr)
360 .chain(args.into_iter().map(|arg| {
361 if fx.cton_type(arg.layout().ty).is_some() {
366 })).collect::<Vec<_>>();
369 CValue::Func(func, _) => {
370 fx.bcx.ins().call(func, &call_args);
373 let func_ty = func.layout().ty;
374 let func = func.load_value(fx);
377 .import_signature(cton_sig_from_fn_ty(fx.tcx, func_ty));
378 fx.bcx.ins().call_indirect(sig, func, &call_args);
381 if let Some((_, dest)) = destination {
382 let ret_ebb = fx.get_ebb(dest);
383 fx.bcx.ins().jump(ret_ebb, &[]);
385 fx.bcx.ins().trap(TrapCode::User(!0));
389 pub fn codegen_return(fx: &mut FunctionCx) {
390 fx.bcx.ins().return_(&[]);
393 fn codegen_intrinsic_call<'a, 'tcx: 'a>(
394 fx: &mut FunctionCx<'a, 'tcx>,
397 args: &[CValue<'tcx>],
398 destination: Option<(CPlace<'tcx>, BasicBlock)>,
400 if let TypeVariants::TyFnDef(def_id, substs) = fn_ty.sty {
401 if sig.abi == Abi::RustIntrinsic {
402 let intrinsic = fx.tcx.item_name(def_id).as_str();
403 let intrinsic = &intrinsic[..];
405 let ret = match destination {
406 Some((place, _)) => place,
409 "codegen_call(fx, _, {:?}, {:?})",
412 // Insert non returning intrinsics here
415 fx.bcx.ins().trap(TrapCode::User(!0 - 1));
418 fx.bcx.ins().trap(TrapCode::User(!0 - 1));
420 _ => unimplemented!("unsupported instrinsic {}", intrinsic),
426 let nil_ty = fx.tcx.mk_nil();
427 let u64_layout = fx.layout_of(fx.tcx.types.u64);
428 let usize_layout = fx.layout_of(fx.tcx.types.usize);
432 assert_eq!(args.len(), 1);
435 assert_eq!(args.len(), 2);
436 let base = args[0].load_value(fx);
437 let offset = args[1].load_value(fx);
438 let res = fx.bcx.ins().iadd(base, offset);
439 let res = CValue::ByVal(res, ret.layout());
440 ret.write_cvalue(fx, res);
442 "likely" | "unlikely" => {
443 assert_eq!(args.len(), 1);
444 ret.write_cvalue(fx, args[0]);
446 "copy" | "copy_nonoverlapping" => {
447 let elem_ty = substs.type_at(0);
448 let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
449 let elem_size = fx.bcx.ins().iconst(types::I64, elem_size as i64);
450 assert_eq!(args.len(), 3);
453 let count = args[2].load_value(fx);
454 let byte_amount = fx.bcx.ins().imul(count, elem_size);
457 &[dst, src, CValue::ByVal(byte_amount, usize_layout)],
461 "discriminant_value" => {
462 assert_eq!(args.len(), 1);
463 let discr = crate::base::trans_get_discriminant(fx, args[0], ret.layout());
464 ret.write_cvalue(fx, discr);
467 assert_eq!(args.len(), 0);
468 let size_of = fx.layout_of(substs.type_at(0)).size.bytes();
469 let size_of = CValue::const_val(fx, usize_layout.ty, size_of as i64);
470 ret.write_cvalue(fx, size_of);
473 assert_eq!(args.len(), 0);
474 let type_id = fx.tcx.type_id_hash(substs.type_at(0));
475 let type_id = CValue::const_val(fx, u64_layout.ty, type_id as i64);
476 ret.write_cvalue(fx, type_id);
479 assert_eq!(args.len(), 0);
480 let min_align = fx.layout_of(substs.type_at(0)).align.abi();
481 let min_align = CValue::const_val(fx, usize_layout.ty, min_align as i64);
482 ret.write_cvalue(fx, min_align);
484 _ if intrinsic.starts_with("unchecked_") => {
485 assert_eq!(args.len(), 2);
486 let bin_op = match intrinsic {
487 "unchecked_div" => BinOp::Div,
488 "unchecked_rem" => BinOp::Rem,
489 "unchecked_shl" => BinOp::Shl,
490 "unchecked_shr" => BinOp::Shr,
491 _ => unimplemented!("intrinsic {}", intrinsic),
493 let res = match ret.layout().ty.sty {
494 TypeVariants::TyUint(_) => crate::base::trans_int_binop(
502 TypeVariants::TyInt(_) => crate::base::trans_int_binop(
512 ret.write_cvalue(fx, res);
514 _ if intrinsic.ends_with("_with_overflow") => {
515 assert_eq!(args.len(), 2);
516 assert_eq!(args[0].layout().ty, args[1].layout().ty);
517 let bin_op = match intrinsic {
518 "add_with_overflow" => BinOp::Add,
519 "sub_with_overflow" => BinOp::Sub,
520 "mul_with_overflow" => BinOp::Mul,
521 _ => unimplemented!("intrinsic {}", intrinsic),
523 let res = match args[0].layout().ty.sty {
524 TypeVariants::TyUint(_) => crate::base::trans_checked_int_binop(
532 TypeVariants::TyInt(_) => crate::base::trans_checked_int_binop(
542 ret.write_cvalue(fx, res);
544 _ if intrinsic.starts_with("overflowing_") => {
545 assert_eq!(args.len(), 2);
546 assert_eq!(args[0].layout().ty, args[1].layout().ty);
547 let bin_op = match intrinsic {
548 "overflowing_add" => BinOp::Add,
549 "overflowing_sub" => BinOp::Sub,
550 "overflowing_mul" => BinOp::Mul,
551 _ => unimplemented!("intrinsic {}", intrinsic),
553 let res = match args[0].layout().ty.sty {
554 TypeVariants::TyUint(_) => crate::base::trans_int_binop(
562 TypeVariants::TyInt(_) => crate::base::trans_int_binop(
572 ret.write_cvalue(fx, res);
575 assert_eq!(args.len(), 2);
576 let base = args[0].load_value(fx);
577 let offset = args[1].load_value(fx);
578 let res = fx.bcx.ins().iadd(base, offset);
579 ret.write_cvalue(fx, CValue::ByVal(res, args[0].layout()));
582 assert_eq!(args.len(), 1);
583 let src_ty = substs.type_at(0);
584 let dst_ty = substs.type_at(1);
585 assert_eq!(args[0].layout().ty, src_ty);
586 let addr = args[0].force_stack(fx);
587 let dst_layout = fx.layout_of(dst_ty);
588 ret.write_cvalue(fx, CValue::ByRef(addr, dst_layout))
591 assert_eq!(args.len(), 0);
592 let ty = substs.type_at(0);
593 let layout = fx.layout_of(ty);
594 let stack_slot = fx.bcx.create_stack_slot(StackSlotData {
595 kind: StackSlotKind::ExplicitSlot,
596 size: layout.size.bytes() as u32,
600 let uninit_place = CPlace::from_stack_slot(fx, stack_slot, ty);
601 let uninit_val = uninit_place.to_cvalue(fx);
602 ret.write_cvalue(fx, uninit_val);
604 "ctlz" | "ctlz_nonzero" => {
605 assert_eq!(args.len(), 1);
606 let arg = args[0].load_value(fx);
607 let res = CValue::ByVal(fx.bcx.ins().clz(arg), args[0].layout());
608 ret.write_cvalue(fx, res);
610 "cttz" | "cttz_nonzero" => {
611 assert_eq!(args.len(), 1);
612 let arg = args[0].load_value(fx);
613 let res = CValue::ByVal(fx.bcx.ins().clz(arg), args[0].layout());
614 ret.write_cvalue(fx, res);
617 assert_eq!(args.len(), 1);
618 let arg = args[0].load_value(fx);
619 let res = CValue::ByVal(fx.bcx.ins().popcnt(arg), args[0].layout());
620 ret.write_cvalue(fx, res);
622 _ => unimpl!("unsupported intrinsic {}", intrinsic),
625 if let Some((_, dest)) = destination {
626 let ret_ebb = fx.get_ebb(dest);
627 fx.bcx.ins().jump(ret_ebb, &[]);
629 fx.bcx.ins().trap(TrapCode::User(!0));