4 use crate::rustc_target::spec::abi::Abi;
16 fn get_param_ty(self, fx: &FunctionCx<impl Backend>) -> Type {
18 PassMode::NoPass => unimplemented!("pass mode nopass"),
19 PassMode::ByVal(cton_type) => cton_type,
20 PassMode::ByRef => fx.module.pointer_type(),
25 fn get_pass_mode<'a, 'tcx: 'a>(
26 tcx: TyCtxt<'a, 'tcx, 'tcx>,
32 !tcx.layout_of(ParamEnv::reveal_all().and(ty))
36 if let ty::Never = ty.sty {
42 } else if ty.sty == tcx.mk_unit().sty {
48 } else if let Some(ret_ty) = crate::common::cton_type_from_ty(tcx, ty) {
49 PassMode::ByVal(ret_ty)
53 "Non scalars are not yet supported for \"C\" abi ({:?}) is_return: {:?}",
62 fn adjust_arg_for_abi<'a, 'tcx: 'a>(
63 fx: &mut FunctionCx<'a, 'tcx, impl Backend>,
67 match get_pass_mode(fx.tcx, sig.abi, arg.layout().ty, false) {
68 PassMode::NoPass => unimplemented!("pass mode nopass"),
69 PassMode::ByVal(_) => arg.load_value(fx),
70 PassMode::ByRef => arg.force_stack(fx),
74 pub fn cton_sig_from_fn_ty<'a, 'tcx: 'a>(
75 tcx: TyCtxt<'a, 'tcx, 'tcx>,
78 let sig = ty_fn_sig(tcx, fn_ty);
79 assert!(!sig.variadic, "Variadic function are not yet supported");
80 let (call_conv, inputs, output): (CallConv, Vec<Ty>, Ty) = match sig.abi {
81 Abi::Rust => (CallConv::Fast, sig.inputs().to_vec(), sig.output()),
82 Abi::C => (CallConv::SystemV, sig.inputs().to_vec(), sig.output()),
84 assert_eq!(sig.inputs().len(), 2);
85 let extra_args = match sig.inputs().last().unwrap().sty {
86 ty::Tuple(ref tupled_arguments) => tupled_arguments,
87 _ => bug!("argument to function with \"rust-call\" ABI is not a tuple"),
89 let mut inputs: Vec<Ty> = vec![sig.inputs()[0]];
90 inputs.extend(extra_args.into_iter());
91 (CallConv::Fast, inputs, sig.output())
93 Abi::System => bug!("system abi should be selected elsewhere"),
94 Abi::RustIntrinsic => (CallConv::SystemV, sig.inputs().to_vec(), sig.output()),
95 _ => unimplemented!("unsupported abi {:?}", sig.abi),
100 .filter_map(|ty| match get_pass_mode(tcx, sig.abi, ty, false) {
101 PassMode::ByVal(cton_ty) => Some(cton_ty),
102 PassMode::NoPass => unimplemented!("pass mode nopass"),
103 PassMode::ByRef => Some(pointer_ty(tcx)),
106 let (params, returns) = match get_pass_mode(tcx, sig.abi, output, true) {
107 PassMode::NoPass => (inputs.map(AbiParam::new).collect(), vec![]),
108 PassMode::ByVal(ret_ty) => (
109 inputs.map(AbiParam::new).collect(),
110 vec![AbiParam::new(ret_ty)],
114 Some(pointer_ty(tcx)) // First param is place to put return val
131 fn ty_fn_sig<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> ty::FnSig<'tcx> {
132 let sig = match ty.sty {
134 // Shims currently have type TyFnPtr. Not sure this should remain.
135 ty::FnPtr(_) => ty.fn_sig(tcx),
136 ty::Closure(def_id, substs) => {
137 let sig = substs.closure_sig(def_id, tcx);
139 let env_ty = tcx.closure_env_ty(def_id, substs).unwrap();
140 sig.map_bound(|sig| tcx.mk_fn_sig(
141 iter::once(*env_ty.skip_binder()).chain(sig.inputs().iter().cloned()),
148 ty::Generator(def_id, substs, _) => {
149 let sig = substs.poly_sig(def_id, tcx);
151 let env_region = ty::ReLateBound(ty::INNERMOST, ty::BrEnv);
152 let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
154 sig.map_bound(|sig| {
155 let state_did = tcx.lang_items().gen_state().unwrap();
156 let state_adt_ref = tcx.adt_def(state_did);
157 let state_substs = tcx.intern_substs(&[
159 sig.return_ty.into(),
161 let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
163 tcx.mk_fn_sig(iter::once(env_ty),
166 hir::Unsafety::Normal,
171 _ => bug!("unexpected type {:?} to ty_fn_sig", ty)
173 tcx.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), &sig)
176 pub fn get_function_name_and_sig<'a, 'tcx>(
177 tcx: TyCtxt<'a, 'tcx, 'tcx>,
178 inst: Instance<'tcx>,
179 ) -> (String, Signature) {
180 assert!(!inst.substs.needs_infer() && !inst.substs.has_param_types());
181 let fn_ty = inst.ty(tcx);
182 let sig = cton_sig_from_fn_ty(tcx, fn_ty);
183 (tcx.symbol_name(inst).as_str().to_string(), sig)
186 impl<'a, 'tcx: 'a, B: Backend + 'a> FunctionCx<'a, 'tcx, B> {
187 /// Instance must be monomorphized
188 pub fn get_function_id(&mut self, inst: Instance<'tcx>) -> FuncId {
189 let (name, sig) = get_function_name_and_sig(self.tcx, inst);
191 .declare_function(&name, Linkage::Import, &sig)
195 /// Instance must be monomorphized
196 pub fn get_function_ref(&mut self, inst: Instance<'tcx>) -> FuncRef {
197 let func_id = self.get_function_id(inst);
199 .declare_func_in_func(func_id, &mut self.bcx.func)
205 input_tys: Vec<types::Type>,
206 output_ty: Option<types::Type>,
209 let sig = Signature {
210 params: input_tys.iter().cloned().map(AbiParam::new).collect(),
212 .map(|output_ty| vec![AbiParam::new(output_ty)])
213 .unwrap_or(Vec::new()),
214 call_conv: CallConv::SystemV,
218 .declare_function(&name, Linkage::Import, &sig)
222 .declare_func_in_func(func_id, &mut self.bcx.func);
223 let call_inst = self.bcx.ins().call(func_ref, args);
224 if output_ty.is_none() {
227 let results = self.bcx.inst_results(call_inst);
228 assert_eq!(results.len(), 1);
235 args: &[CValue<'tcx>],
238 let (input_tys, args): (Vec<_>, Vec<_>) = args
242 self.cton_type(arg.layout().ty).unwrap(),
243 arg.load_value(self),
246 let return_layout = self.layout_of(return_ty);
247 let return_ty = if let ty::Tuple(tup) = return_ty.sty {
249 bug!("easy_call( (...) -> <non empty tuple> ) is not allowed");
253 Some(self.cton_type(return_ty).unwrap())
255 if let Some(val) = self.lib_call(name, input_tys, return_ty, &args) {
256 CValue::ByVal(val, return_layout)
259 self.bcx.ins().iconst(self.module.pointer_type(), 0),
265 fn self_sig(&self) -> FnSig<'tcx> {
266 ty_fn_sig(self.tcx, self.instance.ty(self.tcx))
269 fn return_type(&self) -> Ty<'tcx> {
270 self.self_sig().output()
274 pub fn codegen_fn_prelude<'a, 'tcx: 'a>(
275 fx: &mut FunctionCx<'a, 'tcx, impl Backend>,
278 let ssa_analyzed = crate::analyze::analyze(fx);
280 let ret_layout = fx.layout_of(fx.return_type());
281 let output_pass_mode = get_pass_mode(fx.tcx, fx.self_sig().abi, fx.return_type(), true);
282 let ret_param = match output_pass_mode {
283 PassMode::NoPass => None,
284 PassMode::ByVal(_) => None,
285 PassMode::ByRef => Some(fx.bcx.append_ebb_param(start_ebb, fx.module.pointer_type())),
297 let arg_ty = fx.monomorphize(&fx.mir.local_decls[local].ty);
299 // Adapted from https://github.com/rust-lang/rust/blob/145155dc96757002c7b2e9de8489416e2fdbbd57/src/librustc_codegen_llvm/mir/mod.rs#L442-L482
300 if Some(local) == fx.mir.spread_arg {
301 // This argument (e.g. the last argument in the "rust-call" ABI)
302 // is a tuple that was spread at the ABI level and now we have
303 // to reconstruct it into a tuple local variable, from multiple
304 // individual function arguments.
306 let tupled_arg_tys = match arg_ty.sty {
307 ty::Tuple(ref tys) => tys,
308 _ => bug!("spread argument isn't a tuple?! but {:?}", arg_ty),
311 let mut ebb_params = Vec::new();
312 for arg_ty in tupled_arg_tys.iter() {
314 get_pass_mode(fx.tcx, fx.self_sig().abi, arg_ty, false).get_param_ty(fx);
315 ebb_params.push(fx.bcx.append_ebb_param(start_ebb, cton_type));
318 (local, ArgKind::Spread(ebb_params), arg_ty)
321 get_pass_mode(fx.tcx, fx.self_sig().abi, arg_ty, false).get_param_ty(fx);
324 ArgKind::Normal(fx.bcx.append_ebb_param(start_ebb, cton_type)),
328 }).collect::<Vec<(Local, ArgKind, Ty)>>();
330 fx.bcx.switch_to_block(start_ebb);
332 fx.top_nop = Some(fx.bcx.ins().nop());
333 fx.add_global_comment(format!("ssa {:?}", ssa_analyzed));
335 match output_pass_mode {
336 PassMode::NoPass => {
337 let null = fx.bcx.ins().iconst(fx.module.pointer_type(), 0);
338 //unimplemented!("pass mode nopass");
341 CPlace::Addr(null, None, fx.layout_of(fx.return_type())),
344 PassMode::ByVal(ret_ty) => {
345 fx.bcx.declare_var(mir_var(RETURN_PLACE), ret_ty);
347 .insert(RETURN_PLACE, CPlace::Var(RETURN_PLACE, ret_layout));
352 CPlace::Addr(ret_param.unwrap(), None, ret_layout),
357 for (local, arg_kind, ty) in func_params {
358 let layout = fx.layout_of(ty);
360 if let ArgKind::Normal(ebb_param) = arg_kind {
364 .contains(crate::analyze::Flags::NOT_SSA)
367 .declare_var(mir_var(local), fx.cton_type(ty).unwrap());
368 match get_pass_mode(fx.tcx, fx.self_sig().abi, ty, false) {
369 PassMode::NoPass => unimplemented!("pass mode nopass"),
370 PassMode::ByVal(_) => fx.bcx.def_var(mir_var(local), ebb_param),
372 let val = CValue::ByRef(ebb_param, fx.layout_of(ty)).load_value(fx);
373 fx.bcx.def_var(mir_var(local), val);
376 fx.local_map.insert(local, CPlace::Var(local, layout));
381 let stack_slot = fx.bcx.create_stack_slot(StackSlotData {
382 kind: StackSlotKind::ExplicitSlot,
383 size: layout.size.bytes() as u32,
387 let place = CPlace::from_stack_slot(fx, stack_slot, ty);
390 ArgKind::Normal(ebb_param) => match get_pass_mode(fx.tcx, fx.self_sig().abi, ty, false)
392 PassMode::NoPass => unimplemented!("pass mode nopass"),
393 PassMode::ByVal(_) => {
394 place.write_cvalue(fx, CValue::ByVal(ebb_param, place.layout()))
396 PassMode::ByRef => place.write_cvalue(fx, CValue::ByRef(ebb_param, place.layout())),
398 ArgKind::Spread(ebb_params) => {
399 for (i, ebb_param) in ebb_params.into_iter().enumerate() {
400 let sub_place = place.place_field(fx, mir::Field::new(i));
401 match get_pass_mode(fx.tcx, fx.self_sig().abi, sub_place.layout().ty, false) {
402 PassMode::NoPass => unimplemented!("pass mode nopass"),
403 PassMode::ByVal(_) => {
404 sub_place.write_cvalue(fx, CValue::ByVal(ebb_param, sub_place.layout()))
407 sub_place.write_cvalue(fx, CValue::ByRef(ebb_param, sub_place.layout()))
413 fx.local_map.insert(local, place);
416 for local in fx.mir.vars_and_temps_iter() {
417 let ty = fx.mir.local_decls[local].ty;
418 let layout = fx.layout_of(ty);
420 let place = if ssa_analyzed
423 .contains(crate::analyze::Flags::NOT_SSA)
425 let stack_slot = fx.bcx.create_stack_slot(StackSlotData {
426 kind: StackSlotKind::ExplicitSlot,
427 size: layout.size.bytes() as u32,
430 CPlace::from_stack_slot(fx, stack_slot, ty)
433 .declare_var(mir_var(local), fx.cton_type(ty).unwrap());
434 CPlace::Var(local, layout)
437 fx.local_map.insert(local, place);
442 .jump(*fx.ebb_map.get(&START_BLOCK).unwrap(), &[]);
445 pub fn codegen_terminator_call<'a, 'tcx: 'a>(
446 fx: &mut FunctionCx<'a, 'tcx, impl Backend>,
447 func: &Operand<'tcx>,
448 args: &[Operand<'tcx>],
449 destination: &Option<(Place<'tcx>, BasicBlock)>,
451 let fn_ty = fx.monomorphize(&func.ty(&fx.mir.local_decls, fx.tcx));
452 let sig = ty_fn_sig(fx.tcx, fn_ty);
454 // Unpack arguments tuple for closures
455 let args = if sig.abi == Abi::RustCall {
456 assert_eq!(args.len(), 2, "rust-call abi requires two arguments");
457 let self_arg = trans_operand(fx, &args[0]);
458 let pack_arg = trans_operand(fx, &args[1]);
459 let mut args = Vec::new();
461 match pack_arg.layout().ty.sty {
462 ty::Tuple(ref tupled_arguments) => {
463 for (i, _) in tupled_arguments.iter().enumerate() {
464 args.push(pack_arg.value_field(fx, mir::Field::new(i)));
467 _ => bug!("argument to function with \"rust-call\" ABI is not a tuple"),
472 .map(|arg| trans_operand(fx, arg))
476 let destination = destination
478 .map(|&(ref place, bb)| (trans_place(fx, place), bb));
480 if !codegen_intrinsic_call(fx, fn_ty, &args, destination) {
486 destination.map(|(place, _)| place),
489 if let Some((_, dest)) = destination {
490 let ret_ebb = fx.get_ebb(dest);
491 fx.bcx.ins().jump(ret_ebb, &[]);
493 fx.bcx.ins().trap(TrapCode::User(!0));
498 pub fn codegen_call_inner<'a, 'tcx: 'a>(
499 fx: &mut FunctionCx<'a, 'tcx, impl Backend>,
500 func: Option<&Operand<'tcx>>,
502 args: Vec<CValue<'tcx>>,
503 ret_place: Option<CPlace<'tcx>>,
505 let sig = ty_fn_sig(fx.tcx, fn_ty);
507 let ret_layout = fx.layout_of(sig.output());
509 let output_pass_mode = get_pass_mode(fx.tcx, sig.abi, sig.output(), true);
510 let return_ptr = match output_pass_mode {
511 PassMode::NoPass => None,
512 PassMode::ByRef => match ret_place {
513 Some(ret_place) => Some(ret_place.expect_addr()),
514 None => Some(fx.bcx.ins().iconst(fx.module.pointer_type(), 0)),
516 PassMode::ByVal(_) => None,
519 let instance = match fn_ty.sty {
520 ty::FnDef(def_id, substs) => {
521 Some(Instance::resolve(fx.tcx, ParamEnv::reveal_all(), def_id, substs).unwrap())
526 let func_ref: Option<Value>; // Indirect call target
529 if let Some(Instance {
530 def: InstanceDef::Virtual(_, idx),
534 let (ptr, method) = crate::vtable::get_ptr_and_method_ref(fx, args[0], idx);
535 func_ref = Some(method);
538 func_ref = if instance.is_none() {
539 let func = trans_operand(fx, func.expect("indirect call without func Operand"));
540 Some(func.load_value(fx))
545 args.get(0).map(|arg| adjust_arg_for_abi(fx, sig, *arg))
549 let call_args: Vec<Value> = return_ptr
555 .map(|arg| adjust_arg_for_abi(fx, sig, arg)),
556 ).collect::<Vec<_>>();
558 let sig = fx.bcx.import_signature(cton_sig_from_fn_ty(fx.tcx, fn_ty));
559 let call_inst = if let Some(func_ref) = func_ref {
560 fx.bcx.ins().call_indirect(sig, func_ref, &call_args)
562 let func_ref = fx.get_function_ref(instance.expect("non-indirect call on non-FnDef type"));
563 fx.bcx.ins().call(func_ref, &call_args)
566 match output_pass_mode {
567 PassMode::NoPass => {}
568 PassMode::ByVal(_) => {
569 if let Some(ret_place) = ret_place {
570 let results = fx.bcx.inst_results(call_inst);
571 ret_place.write_cvalue(fx, CValue::ByVal(results[0], ret_layout));
574 PassMode::ByRef => {}
578 pub fn codegen_return(fx: &mut FunctionCx<impl Backend>) {
579 match get_pass_mode(fx.tcx, fx.self_sig().abi, fx.return_type(), true) {
580 PassMode::NoPass | PassMode::ByRef => {
581 fx.bcx.ins().return_(&[]);
583 PassMode::ByVal(_) => {
584 let place = fx.get_local_place(RETURN_PLACE);
585 let ret_val = place.to_cvalue(fx).load_value(fx);
586 fx.bcx.ins().return_(&[ret_val]);
591 fn codegen_intrinsic_call<'a, 'tcx: 'a>(
592 fx: &mut FunctionCx<'a, 'tcx, impl Backend>,
594 args: &[CValue<'tcx>],
595 destination: Option<(CPlace<'tcx>, BasicBlock)>,
597 if let ty::FnDef(def_id, substs) = fn_ty.sty {
598 let sig = ty_fn_sig(fx.tcx, fn_ty);
600 if sig.abi == Abi::RustIntrinsic {
601 let intrinsic = fx.tcx.item_name(def_id).as_str();
602 let intrinsic = &intrinsic[..];
604 let ret = match destination {
605 Some((place, _)) => place,
607 // Insert non returning intrinsics here
610 fx.bcx.ins().trap(TrapCode::User(!0 - 1));
613 fx.bcx.ins().trap(TrapCode::User(!0 - 1));
615 _ => unimplemented!("unsupported instrinsic {}", intrinsic),
621 let nil_ty = fx.tcx.mk_unit();
622 let u64_layout = fx.layout_of(fx.tcx.types.u64);
623 let usize_layout = fx.layout_of(fx.tcx.types.usize);
627 assert_eq!(args.len(), 1);
630 assert_eq!(args.len(), 2);
631 let base = args[0].load_value(fx);
632 let offset = args[1].load_value(fx);
633 let res = fx.bcx.ins().iadd(base, offset);
634 let res = CValue::ByVal(res, ret.layout());
635 ret.write_cvalue(fx, res);
637 "likely" | "unlikely" => {
638 assert_eq!(args.len(), 1);
639 ret.write_cvalue(fx, args[0]);
641 "copy" | "copy_nonoverlapping" => {
642 let elem_ty = substs.type_at(0);
643 let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
647 .iconst(fx.module.pointer_type(), elem_size as i64);
648 assert_eq!(args.len(), 3);
649 let src = args[0].load_value(fx);
650 let dst = args[1].load_value(fx);
651 let count = args[2].load_value(fx);
652 let byte_amount = fx.bcx.ins().imul(count, elem_size);
654 if intrinsic.ends_with("_nonoverlapping") {
655 fx.bcx.call_memcpy(fx.isa, dst, src, byte_amount);
657 fx.bcx.call_memmove(fx.isa, dst, src, byte_amount);
660 "discriminant_value" => {
661 assert_eq!(args.len(), 1);
662 let discr = crate::base::trans_get_discriminant(fx, args[0], ret.layout());
663 ret.write_cvalue(fx, discr);
666 assert_eq!(args.len(), 0);
667 let size_of = fx.layout_of(substs.type_at(0)).size.bytes();
668 let size_of = CValue::const_val(fx, usize_layout.ty, size_of as i64);
669 ret.write_cvalue(fx, size_of);
672 assert_eq!(args.len(), 1);
673 let layout = fx.layout_of(substs.type_at(0));
674 let size = match &layout.ty.sty {
675 _ if !layout.is_unsized() => fx
678 .iconst(fx.module.pointer_type(), layout.size.bytes() as i64),
680 let len = args[0].load_value_pair(fx).1;
681 let elem_size = fx.layout_of(elem).size.bytes();
682 fx.bcx.ins().imul_imm(len, elem_size as i64)
684 ty::Dynamic(..) => crate::vtable::size_of_obj(fx, args[0]),
685 ty => bug!("size_of_val for unknown unsized type {:?}", ty),
687 ret.write_cvalue(fx, CValue::ByVal(size, usize_layout));
690 assert_eq!(args.len(), 0);
691 let min_align = fx.layout_of(substs.type_at(0)).align.abi();
692 let min_align = CValue::const_val(fx, usize_layout.ty, min_align as i64);
693 ret.write_cvalue(fx, min_align);
695 "min_align_of_val" => {
696 assert_eq!(args.len(), 1);
697 let layout = fx.layout_of(substs.type_at(0));
698 let align = match &layout.ty.sty {
699 _ if !layout.is_unsized() => fx
702 .iconst(fx.module.pointer_type(), layout.align.abi() as i64),
704 let align = fx.layout_of(elem).align.abi() as i64;
705 fx.bcx.ins().iconst(fx.module.pointer_type(), align)
707 ty::Dynamic(..) => crate::vtable::min_align_of_obj(fx, args[0]),
708 ty => unimplemented!("min_align_of_val for {:?}", ty),
710 ret.write_cvalue(fx, CValue::ByVal(align, usize_layout));
713 assert_eq!(args.len(), 0);
714 let type_id = fx.tcx.type_id_hash(substs.type_at(0));
715 let type_id = CValue::const_val(fx, u64_layout.ty, type_id as i64);
716 ret.write_cvalue(fx, type_id);
718 _ if intrinsic.starts_with("unchecked_") => {
719 assert_eq!(args.len(), 2);
720 let bin_op = match intrinsic {
721 "unchecked_div" => BinOp::Div,
722 "unchecked_rem" => BinOp::Rem,
723 "unchecked_shl" => BinOp::Shl,
724 "unchecked_shr" => BinOp::Shr,
725 _ => unimplemented!("intrinsic {}", intrinsic),
727 let res = match ret.layout().ty.sty {
728 ty::Uint(_) => crate::base::trans_int_binop(
736 ty::Int(_) => crate::base::trans_int_binop(
746 ret.write_cvalue(fx, res);
748 _ if intrinsic.ends_with("_with_overflow") => {
749 assert_eq!(args.len(), 2);
750 assert_eq!(args[0].layout().ty, args[1].layout().ty);
751 let bin_op = match intrinsic {
752 "add_with_overflow" => BinOp::Add,
753 "sub_with_overflow" => BinOp::Sub,
754 "mul_with_overflow" => BinOp::Mul,
755 _ => unimplemented!("intrinsic {}", intrinsic),
757 let res = match args[0].layout().ty.sty {
758 ty::Uint(_) => crate::base::trans_checked_int_binop(
766 ty::Int(_) => crate::base::trans_checked_int_binop(
776 ret.write_cvalue(fx, res);
778 _ if intrinsic.starts_with("overflowing_") => {
779 assert_eq!(args.len(), 2);
780 assert_eq!(args[0].layout().ty, args[1].layout().ty);
781 let bin_op = match intrinsic {
782 "overflowing_add" => BinOp::Add,
783 "overflowing_sub" => BinOp::Sub,
784 "overflowing_mul" => BinOp::Mul,
785 _ => unimplemented!("intrinsic {}", intrinsic),
787 let res = match args[0].layout().ty.sty {
788 ty::Uint(_) => crate::base::trans_int_binop(
796 ty::Int(_) => crate::base::trans_int_binop(
806 ret.write_cvalue(fx, res);
809 assert_eq!(args.len(), 2);
810 let base = args[0].load_value(fx);
811 let offset = args[1].load_value(fx);
812 let res = fx.bcx.ins().iadd(base, offset);
813 ret.write_cvalue(fx, CValue::ByVal(res, args[0].layout()));
816 assert_eq!(args.len(), 1);
817 let src_ty = substs.type_at(0);
818 let dst_ty = substs.type_at(1);
819 assert_eq!(args[0].layout().ty, src_ty);
820 let addr = args[0].force_stack(fx);
821 let dst_layout = fx.layout_of(dst_ty);
822 ret.write_cvalue(fx, CValue::ByRef(addr, dst_layout))
825 assert_eq!(args.len(), 0);
826 let ty = substs.type_at(0);
827 let layout = fx.layout_of(ty);
828 let stack_slot = fx.bcx.create_stack_slot(StackSlotData {
829 kind: StackSlotKind::ExplicitSlot,
830 size: layout.size.bytes() as u32,
834 let uninit_place = CPlace::from_stack_slot(fx, stack_slot, ty);
835 let uninit_val = uninit_place.to_cvalue(fx);
836 ret.write_cvalue(fx, uninit_val);
838 "ctlz" | "ctlz_nonzero" => {
839 assert_eq!(args.len(), 1);
840 let arg = args[0].load_value(fx);
841 let res = CValue::ByVal(fx.bcx.ins().clz(arg), args[0].layout());
842 ret.write_cvalue(fx, res);
844 "cttz" | "cttz_nonzero" => {
845 assert_eq!(args.len(), 1);
846 let arg = args[0].load_value(fx);
847 let res = CValue::ByVal(fx.bcx.ins().clz(arg), args[0].layout());
848 ret.write_cvalue(fx, res);
851 assert_eq!(args.len(), 1);
852 let arg = args[0].load_value(fx);
853 let res = CValue::ByVal(fx.bcx.ins().popcnt(arg), args[0].layout());
854 ret.write_cvalue(fx, res);
857 assert_eq!(args.len(), 0);
858 let ty = substs.type_at(0);
859 let needs_drop = if ty.needs_drop(fx.tcx, ParamEnv::reveal_all()) {
864 let needs_drop = CValue::const_val(fx, fx.tcx.types.bool, needs_drop);
865 ret.write_cvalue(fx, needs_drop);
867 _ if intrinsic.starts_with("atomic_fence") => {}
868 _ if intrinsic.starts_with("atomic_load") => {
869 assert_eq!(args.len(), 1);
871 fx.layout_of(args[0].layout().ty.builtin_deref(true).unwrap().ty);
872 let val = CValue::ByRef(args[0].load_value(fx), inner_layout);
873 ret.write_cvalue(fx, val);
875 _ if intrinsic.starts_with("atomic_store") => {
876 assert_eq!(args.len(), 2);
877 let dest = CPlace::Addr(args[0].load_value(fx), None, args[1].layout());
878 dest.write_cvalue(fx, args[1]);
880 _ if intrinsic.starts_with("atomic_xadd") => {
881 assert_eq!(args.len(), 2);
882 let clif_ty = fx.cton_type(substs.type_at(0)).unwrap();
883 let ptr = args[0].load_value(fx);
884 let amount = args[1].load_value(fx);
885 let old = fx.bcx.ins().load(clif_ty, MemFlags::new(), ptr, 0);
886 let new = fx.bcx.ins().iadd(old, amount);
887 fx.bcx.ins().store(MemFlags::new(), ptr, new, 0);
888 ret.write_cvalue(fx, CValue::ByVal(old, fx.layout_of(substs.type_at(0))));
890 _ if intrinsic.starts_with("atomic_xsub") => {
891 assert_eq!(args.len(), 2);
892 let clif_ty = fx.cton_type(substs.type_at(0)).unwrap();
893 let ptr = args[0].load_value(fx);
894 let amount = args[1].load_value(fx);
895 let old = fx.bcx.ins().load(clif_ty, MemFlags::new(), ptr, 0);
896 let new = fx.bcx.ins().isub(old, amount);
897 fx.bcx.ins().store(MemFlags::new(), ptr, new, 0);
898 ret.write_cvalue(fx, CValue::ByVal(old, fx.layout_of(substs.type_at(0))));
900 _ => unimpl!("unsupported intrinsic {}", intrinsic),
903 if let Some((_, dest)) = destination {
904 let ret_ebb = fx.get_ebb(dest);
905 fx.bcx.ins().jump(ret_ebb, &[]);
907 fx.bcx.ins().trap(TrapCode::User(!0));