1 //! Emulate AArch64 LLVM intrinsics
3 use crate::intrinsics::*;
6 use rustc_middle::ty::subst::SubstsRef;
8 pub(crate) fn codegen_aarch64_llvm_intrinsic_call<'tcx>(
9 fx: &mut FunctionCx<'_, '_, 'tcx>,
11 _substs: SubstsRef<'tcx>,
12 args: &[mir::Operand<'tcx>],
14 target: Option<BasicBlock>,
16 // llvm.aarch64.neon.sqshl.v*i*
19 "llvm.aarch64.isb" => {
23 _ if intrinsic.starts_with("llvm.aarch64.neon.abs.v") => {
24 intrinsic_args!(fx, args => (a); intrinsic);
26 simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| {
27 fx.bcx.ins().iabs(lane)
31 _ if intrinsic.starts_with("llvm.aarch64.neon.cls.v") => {
32 intrinsic_args!(fx, args => (a); intrinsic);
34 simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| {
35 fx.bcx.ins().cls(lane)
39 _ if intrinsic.starts_with("llvm.aarch64.neon.rbit.v") => {
40 intrinsic_args!(fx, args => (a); intrinsic);
42 simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| {
43 fx.bcx.ins().bitrev(lane)
47 _ if intrinsic.starts_with("llvm.aarch64.neon.sqadd.v") => {
48 intrinsic_args!(fx, args => (x, y); intrinsic);
50 simd_pair_for_each_lane_typed(fx, x, y, ret, &|fx, x_lane, y_lane| {
51 crate::num::codegen_saturating_int_binop(fx, BinOp::Add, x_lane, y_lane)
55 _ if intrinsic.starts_with("llvm.aarch64.neon.sqsub.v") => {
56 intrinsic_args!(fx, args => (x, y); intrinsic);
58 simd_pair_for_each_lane_typed(fx, x, y, ret, &|fx, x_lane, y_lane| {
59 crate::num::codegen_saturating_int_binop(fx, BinOp::Sub, x_lane, y_lane)
63 _ if intrinsic.starts_with("llvm.aarch64.neon.smax.v") => {
64 intrinsic_args!(fx, args => (x, y); intrinsic);
66 simd_pair_for_each_lane(
71 &|fx, _lane_ty, _res_lane_ty, x_lane, y_lane| {
72 let gt = fx.bcx.ins().icmp(IntCC::SignedGreaterThan, x_lane, y_lane);
73 fx.bcx.ins().select(gt, x_lane, y_lane)
78 _ if intrinsic.starts_with("llvm.aarch64.neon.umax.v") => {
79 intrinsic_args!(fx, args => (x, y); intrinsic);
81 simd_pair_for_each_lane(
86 &|fx, _lane_ty, _res_lane_ty, x_lane, y_lane| {
87 let gt = fx.bcx.ins().icmp(IntCC::UnsignedGreaterThan, x_lane, y_lane);
88 fx.bcx.ins().select(gt, x_lane, y_lane)
93 _ if intrinsic.starts_with("llvm.aarch64.neon.smaxv.i") => {
94 intrinsic_args!(fx, args => (v); intrinsic);
96 simd_reduce(fx, v, None, ret, &|fx, _ty, a, b| {
97 let gt = fx.bcx.ins().icmp(IntCC::SignedGreaterThan, a, b);
98 fx.bcx.ins().select(gt, a, b)
102 _ if intrinsic.starts_with("llvm.aarch64.neon.umaxv.i") => {
103 intrinsic_args!(fx, args => (v); intrinsic);
105 simd_reduce(fx, v, None, ret, &|fx, _ty, a, b| {
106 let gt = fx.bcx.ins().icmp(IntCC::UnsignedGreaterThan, a, b);
107 fx.bcx.ins().select(gt, a, b)
111 _ if intrinsic.starts_with("llvm.aarch64.neon.smin.v") => {
112 intrinsic_args!(fx, args => (x, y); intrinsic);
114 simd_pair_for_each_lane(
119 &|fx, _lane_ty, _res_lane_ty, x_lane, y_lane| {
120 let gt = fx.bcx.ins().icmp(IntCC::SignedLessThan, x_lane, y_lane);
121 fx.bcx.ins().select(gt, x_lane, y_lane)
126 _ if intrinsic.starts_with("llvm.aarch64.neon.umin.v") => {
127 intrinsic_args!(fx, args => (x, y); intrinsic);
129 simd_pair_for_each_lane(
134 &|fx, _lane_ty, _res_lane_ty, x_lane, y_lane| {
135 let gt = fx.bcx.ins().icmp(IntCC::UnsignedLessThan, x_lane, y_lane);
136 fx.bcx.ins().select(gt, x_lane, y_lane)
141 _ if intrinsic.starts_with("llvm.aarch64.neon.sminv.i") => {
142 intrinsic_args!(fx, args => (v); intrinsic);
144 simd_reduce(fx, v, None, ret, &|fx, _ty, a, b| {
145 let gt = fx.bcx.ins().icmp(IntCC::SignedLessThan, a, b);
146 fx.bcx.ins().select(gt, a, b)
150 _ if intrinsic.starts_with("llvm.aarch64.neon.uminv.i") => {
151 intrinsic_args!(fx, args => (v); intrinsic);
153 simd_reduce(fx, v, None, ret, &|fx, _ty, a, b| {
154 let gt = fx.bcx.ins().icmp(IntCC::UnsignedLessThan, a, b);
155 fx.bcx.ins().select(gt, a, b)
160 _ if intrinsic.starts_with("llvm.aarch64.neon.sshl.v")
161 || intrinsic.starts_with("llvm.aarch64.neon.sqshl.v")
162 // FIXME split this one out once saturating is implemented
163 || intrinsic.starts_with("llvm.aarch64.neon.sqshlu.v") =>
165 intrinsic_args!(fx, args => (a, b); intrinsic);
167 simd_pair_for_each_lane(fx, a, b, ret, &|fx, _lane_ty, _res_lane_ty, a, b| {
169 fx.bcx.ins().ishl(a, b)
173 _ if intrinsic.starts_with("llvm.aarch64.neon.sqshrn.v") => {
174 let (a, imm32) = match args {
175 [a, imm32] => (a, imm32),
176 _ => bug!("wrong number of args for intrinsic {intrinsic}"),
178 let a = codegen_operand(fx, a);
179 let imm32 = crate::constant::mir_operand_get_const_val(fx, imm32)
180 .expect("llvm.aarch64.neon.sqshrn.v* imm32 not const");
182 simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm32
183 .try_to_bits(Size::from_bytes(4))
184 .unwrap_or_else(|| panic!("imm32 not scalar: {:?}", imm32))
186 imm32 if imm32 < 32 => fx.bcx.ins().sshr_imm(lane, i64::from(imm32 as u8)),
187 _ => fx.bcx.ins().iconst(types::I32, 0),
191 _ if intrinsic.starts_with("llvm.aarch64.neon.sqshrun.v") => {
192 let (a, imm32) = match args {
193 [a, imm32] => (a, imm32),
194 _ => bug!("wrong number of args for intrinsic {intrinsic}"),
196 let a = codegen_operand(fx, a);
197 let imm32 = crate::constant::mir_operand_get_const_val(fx, imm32)
198 .expect("llvm.aarch64.neon.sqshrn.v* imm32 not const");
200 simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm32
201 .try_to_bits(Size::from_bytes(4))
202 .unwrap_or_else(|| panic!("imm32 not scalar: {:?}", imm32))
204 imm32 if imm32 < 32 => fx.bcx.ins().ushr_imm(lane, i64::from(imm32 as u8)),
205 _ => fx.bcx.ins().iconst(types::I32, 0),
210 fx.tcx.sess.warn(&format!(
211 "unsupported AArch64 llvm intrinsic {}; replacing with trap",
214 crate::trap::trap_unimplemented(fx, intrinsic);
219 let dest = target.expect("all llvm intrinsics used by stdlib should return");
220 let ret_block = fx.get_block(dest);
221 fx.bcx.ins().jump(ret_block, &[]);