]> git.lizzy.rs Git - rust.git/blob - compiler/rustc_codegen_cranelift/src/intrinsics/llvm_aarch64.rs
Auto merge of #104334 - compiler-errors:ufcs-sugg-wrong-def-id, r=estebank
[rust.git] / compiler / rustc_codegen_cranelift / src / intrinsics / llvm_aarch64.rs
1 //! Emulate AArch64 LLVM intrinsics
2
3 use crate::intrinsics::*;
4 use crate::prelude::*;
5
6 use rustc_middle::ty::subst::SubstsRef;
7
8 pub(crate) fn codegen_aarch64_llvm_intrinsic_call<'tcx>(
9     fx: &mut FunctionCx<'_, '_, 'tcx>,
10     intrinsic: &str,
11     _substs: SubstsRef<'tcx>,
12     args: &[mir::Operand<'tcx>],
13     ret: CPlace<'tcx>,
14     target: Option<BasicBlock>,
15 ) {
16     // llvm.aarch64.neon.sqshl.v*i*
17
18     match intrinsic {
19         "llvm.aarch64.isb" => {
20             fx.bcx.ins().fence();
21         }
22
23         _ if intrinsic.starts_with("llvm.aarch64.neon.abs.v") => {
24             intrinsic_args!(fx, args => (a); intrinsic);
25
26             simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| {
27                 fx.bcx.ins().iabs(lane)
28             });
29         }
30
31         _ if intrinsic.starts_with("llvm.aarch64.neon.cls.v") => {
32             intrinsic_args!(fx, args => (a); intrinsic);
33
34             simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| {
35                 fx.bcx.ins().cls(lane)
36             });
37         }
38
39         _ if intrinsic.starts_with("llvm.aarch64.neon.rbit.v") => {
40             intrinsic_args!(fx, args => (a); intrinsic);
41
42             simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| {
43                 fx.bcx.ins().bitrev(lane)
44             });
45         }
46
47         _ if intrinsic.starts_with("llvm.aarch64.neon.sqadd.v") => {
48             intrinsic_args!(fx, args => (x, y); intrinsic);
49
50             simd_pair_for_each_lane_typed(fx, x, y, ret, &|fx, x_lane, y_lane| {
51                 crate::num::codegen_saturating_int_binop(fx, BinOp::Add, x_lane, y_lane)
52             });
53         }
54
55         _ if intrinsic.starts_with("llvm.aarch64.neon.sqsub.v") => {
56             intrinsic_args!(fx, args => (x, y); intrinsic);
57
58             simd_pair_for_each_lane_typed(fx, x, y, ret, &|fx, x_lane, y_lane| {
59                 crate::num::codegen_saturating_int_binop(fx, BinOp::Sub, x_lane, y_lane)
60             });
61         }
62
63         _ if intrinsic.starts_with("llvm.aarch64.neon.smax.v") => {
64             intrinsic_args!(fx, args => (x, y); intrinsic);
65
66             simd_pair_for_each_lane(
67                 fx,
68                 x,
69                 y,
70                 ret,
71                 &|fx, _lane_ty, _res_lane_ty, x_lane, y_lane| {
72                     let gt = fx.bcx.ins().icmp(IntCC::SignedGreaterThan, x_lane, y_lane);
73                     fx.bcx.ins().select(gt, x_lane, y_lane)
74                 },
75             );
76         }
77
78         _ if intrinsic.starts_with("llvm.aarch64.neon.umax.v") => {
79             intrinsic_args!(fx, args => (x, y); intrinsic);
80
81             simd_pair_for_each_lane(
82                 fx,
83                 x,
84                 y,
85                 ret,
86                 &|fx, _lane_ty, _res_lane_ty, x_lane, y_lane| {
87                     let gt = fx.bcx.ins().icmp(IntCC::UnsignedGreaterThan, x_lane, y_lane);
88                     fx.bcx.ins().select(gt, x_lane, y_lane)
89                 },
90             );
91         }
92
93         _ if intrinsic.starts_with("llvm.aarch64.neon.smaxv.i") => {
94             intrinsic_args!(fx, args => (v); intrinsic);
95
96             simd_reduce(fx, v, None, ret, &|fx, _ty, a, b| {
97                 let gt = fx.bcx.ins().icmp(IntCC::SignedGreaterThan, a, b);
98                 fx.bcx.ins().select(gt, a, b)
99             });
100         }
101
102         _ if intrinsic.starts_with("llvm.aarch64.neon.umaxv.i") => {
103             intrinsic_args!(fx, args => (v); intrinsic);
104
105             simd_reduce(fx, v, None, ret, &|fx, _ty, a, b| {
106                 let gt = fx.bcx.ins().icmp(IntCC::UnsignedGreaterThan, a, b);
107                 fx.bcx.ins().select(gt, a, b)
108             });
109         }
110
111         _ if intrinsic.starts_with("llvm.aarch64.neon.smin.v") => {
112             intrinsic_args!(fx, args => (x, y); intrinsic);
113
114             simd_pair_for_each_lane(
115                 fx,
116                 x,
117                 y,
118                 ret,
119                 &|fx, _lane_ty, _res_lane_ty, x_lane, y_lane| {
120                     let gt = fx.bcx.ins().icmp(IntCC::SignedLessThan, x_lane, y_lane);
121                     fx.bcx.ins().select(gt, x_lane, y_lane)
122                 },
123             );
124         }
125
126         _ if intrinsic.starts_with("llvm.aarch64.neon.umin.v") => {
127             intrinsic_args!(fx, args => (x, y); intrinsic);
128
129             simd_pair_for_each_lane(
130                 fx,
131                 x,
132                 y,
133                 ret,
134                 &|fx, _lane_ty, _res_lane_ty, x_lane, y_lane| {
135                     let gt = fx.bcx.ins().icmp(IntCC::UnsignedLessThan, x_lane, y_lane);
136                     fx.bcx.ins().select(gt, x_lane, y_lane)
137                 },
138             );
139         }
140
141         _ if intrinsic.starts_with("llvm.aarch64.neon.sminv.i") => {
142             intrinsic_args!(fx, args => (v); intrinsic);
143
144             simd_reduce(fx, v, None, ret, &|fx, _ty, a, b| {
145                 let gt = fx.bcx.ins().icmp(IntCC::SignedLessThan, a, b);
146                 fx.bcx.ins().select(gt, a, b)
147             });
148         }
149
150         _ if intrinsic.starts_with("llvm.aarch64.neon.uminv.i") => {
151             intrinsic_args!(fx, args => (v); intrinsic);
152
153             simd_reduce(fx, v, None, ret, &|fx, _ty, a, b| {
154                 let gt = fx.bcx.ins().icmp(IntCC::UnsignedLessThan, a, b);
155                 fx.bcx.ins().select(gt, a, b)
156             });
157         }
158
159         /*
160         _ if intrinsic.starts_with("llvm.aarch64.neon.sshl.v")
161             || intrinsic.starts_with("llvm.aarch64.neon.sqshl.v")
162             // FIXME split this one out once saturating is implemented
163             || intrinsic.starts_with("llvm.aarch64.neon.sqshlu.v") =>
164         {
165             intrinsic_args!(fx, args => (a, b); intrinsic);
166
167             simd_pair_for_each_lane(fx, a, b, ret, &|fx, _lane_ty, _res_lane_ty, a, b| {
168                 // FIXME saturate?
169                 fx.bcx.ins().ishl(a, b)
170             });
171         }
172
173         _ if intrinsic.starts_with("llvm.aarch64.neon.sqshrn.v") => {
174             let (a, imm32) = match args {
175                 [a, imm32] => (a, imm32),
176                 _ => bug!("wrong number of args for intrinsic {intrinsic}"),
177             };
178             let a = codegen_operand(fx, a);
179             let imm32 = crate::constant::mir_operand_get_const_val(fx, imm32)
180                 .expect("llvm.aarch64.neon.sqshrn.v* imm32 not const");
181
182             simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm32
183                 .try_to_bits(Size::from_bytes(4))
184                 .unwrap_or_else(|| panic!("imm32 not scalar: {:?}", imm32))
185             {
186                 imm32 if imm32 < 32 => fx.bcx.ins().sshr_imm(lane, i64::from(imm32 as u8)),
187                 _ => fx.bcx.ins().iconst(types::I32, 0),
188             });
189         }
190
191         _ if intrinsic.starts_with("llvm.aarch64.neon.sqshrun.v") => {
192             let (a, imm32) = match args {
193                 [a, imm32] => (a, imm32),
194                 _ => bug!("wrong number of args for intrinsic {intrinsic}"),
195             };
196             let a = codegen_operand(fx, a);
197             let imm32 = crate::constant::mir_operand_get_const_val(fx, imm32)
198                 .expect("llvm.aarch64.neon.sqshrn.v* imm32 not const");
199
200             simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm32
201                 .try_to_bits(Size::from_bytes(4))
202                 .unwrap_or_else(|| panic!("imm32 not scalar: {:?}", imm32))
203             {
204                 imm32 if imm32 < 32 => fx.bcx.ins().ushr_imm(lane, i64::from(imm32 as u8)),
205                 _ => fx.bcx.ins().iconst(types::I32, 0),
206             });
207         }
208         */
209         _ => {
210             fx.tcx.sess.warn(&format!(
211                 "unsupported AArch64 llvm intrinsic {}; replacing with trap",
212                 intrinsic
213             ));
214             crate::trap::trap_unimplemented(fx, intrinsic);
215             return;
216         }
217     }
218
219     let dest = target.expect("all llvm intrinsics used by stdlib should return");
220     let ret_block = fx.get_block(dest);
221     fx.bcx.ins().jump(ret_block, &[]);
222 }