1 //! Codegen `extern "platform-intrinsic"` intrinsics.
3 use rustc_middle::ty::subst::SubstsRef;
4 use rustc_span::Symbol;
9 fn report_simd_type_validation_error(
10 fx: &mut FunctionCx<'_, '_, '_>,
15 fx.tcx.sess.span_err(span, &format!("invalid monomorphization of `{}` intrinsic: expected SIMD input type, found non-SIMD `{}`", intrinsic, ty));
16 // Prevent verifier error
17 fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
20 pub(super) fn codegen_simd_intrinsic_call<'tcx>(
21 fx: &mut FunctionCx<'_, '_, 'tcx>,
23 _substs: SubstsRef<'tcx>,
24 args: &[mir::Operand<'tcx>],
30 intrinsic_args!(fx, args => (a); intrinsic);
32 if !a.layout().ty.is_simd() {
33 report_simd_type_validation_error(fx, intrinsic, span, a.layout().ty);
37 simd_for_each_lane(fx, a, ret, &|fx, lane_ty, ret_lane_ty, lane| {
38 let ret_lane_clif_ty = fx.clif_type(ret_lane_ty).unwrap();
40 let from_signed = type_sign(lane_ty);
41 let to_signed = type_sign(ret_lane_ty);
43 clif_int_or_float_cast(fx, lane, from_signed, ret_lane_clif_ty, to_signed)
47 sym::simd_eq | sym::simd_ne | sym::simd_lt | sym::simd_le | sym::simd_gt | sym::simd_ge => {
48 intrinsic_args!(fx, args => (x, y); intrinsic);
50 if !x.layout().ty.is_simd() {
51 report_simd_type_validation_error(fx, intrinsic, span, x.layout().ty);
55 // FIXME use vector instructions when possible
56 simd_pair_for_each_lane(fx, x, y, ret, &|fx, lane_ty, res_lane_ty, x_lane, y_lane| {
57 let res_lane = match (lane_ty.kind(), intrinsic) {
58 (ty::Uint(_), sym::simd_eq) => fx.bcx.ins().icmp(IntCC::Equal, x_lane, y_lane),
59 (ty::Uint(_), sym::simd_ne) => {
60 fx.bcx.ins().icmp(IntCC::NotEqual, x_lane, y_lane)
62 (ty::Uint(_), sym::simd_lt) => {
63 fx.bcx.ins().icmp(IntCC::UnsignedLessThan, x_lane, y_lane)
65 (ty::Uint(_), sym::simd_le) => {
66 fx.bcx.ins().icmp(IntCC::UnsignedLessThanOrEqual, x_lane, y_lane)
68 (ty::Uint(_), sym::simd_gt) => {
69 fx.bcx.ins().icmp(IntCC::UnsignedGreaterThan, x_lane, y_lane)
71 (ty::Uint(_), sym::simd_ge) => {
72 fx.bcx.ins().icmp(IntCC::UnsignedGreaterThanOrEqual, x_lane, y_lane)
75 (ty::Int(_), sym::simd_eq) => fx.bcx.ins().icmp(IntCC::Equal, x_lane, y_lane),
76 (ty::Int(_), sym::simd_ne) => {
77 fx.bcx.ins().icmp(IntCC::NotEqual, x_lane, y_lane)
79 (ty::Int(_), sym::simd_lt) => {
80 fx.bcx.ins().icmp(IntCC::SignedLessThan, x_lane, y_lane)
82 (ty::Int(_), sym::simd_le) => {
83 fx.bcx.ins().icmp(IntCC::SignedLessThanOrEqual, x_lane, y_lane)
85 (ty::Int(_), sym::simd_gt) => {
86 fx.bcx.ins().icmp(IntCC::SignedGreaterThan, x_lane, y_lane)
88 (ty::Int(_), sym::simd_ge) => {
89 fx.bcx.ins().icmp(IntCC::SignedGreaterThanOrEqual, x_lane, y_lane)
92 (ty::Float(_), sym::simd_eq) => {
93 fx.bcx.ins().fcmp(FloatCC::Equal, x_lane, y_lane)
95 (ty::Float(_), sym::simd_ne) => {
96 fx.bcx.ins().fcmp(FloatCC::NotEqual, x_lane, y_lane)
98 (ty::Float(_), sym::simd_lt) => {
99 fx.bcx.ins().fcmp(FloatCC::LessThan, x_lane, y_lane)
101 (ty::Float(_), sym::simd_le) => {
102 fx.bcx.ins().fcmp(FloatCC::LessThanOrEqual, x_lane, y_lane)
104 (ty::Float(_), sym::simd_gt) => {
105 fx.bcx.ins().fcmp(FloatCC::GreaterThan, x_lane, y_lane)
107 (ty::Float(_), sym::simd_ge) => {
108 fx.bcx.ins().fcmp(FloatCC::GreaterThanOrEqual, x_lane, y_lane)
114 let ty = fx.clif_type(res_lane_ty).unwrap();
116 let res_lane = fx.bcx.ins().bint(ty, res_lane);
117 fx.bcx.ins().ineg(res_lane)
121 // simd_shuffle32<T, U>(x: T, y: T, idx: [u32; 32]) -> U
122 _ if intrinsic.as_str().starts_with("simd_shuffle") => {
123 let (x, y, idx) = match args {
124 [x, y, idx] => (x, y, idx),
126 bug!("wrong number of args for intrinsic {intrinsic}");
129 let x = codegen_operand(fx, x);
130 let y = codegen_operand(fx, y);
132 if !x.layout().ty.is_simd() {
133 report_simd_type_validation_error(fx, intrinsic, span, x.layout().ty);
137 // If this intrinsic is the older "simd_shuffleN" form, simply parse the integer.
138 // If there is no suffix, use the index array length.
139 let n: u16 = if intrinsic == sym::simd_shuffle {
140 // Make sure this is actually an array, since typeck only checks the length-suffixed
141 // version of this intrinsic.
142 let idx_ty = fx.monomorphize(idx.ty(fx.mir, fx.tcx));
143 match idx_ty.kind() {
144 ty::Array(ty, len) if matches!(ty.kind(), ty::Uint(ty::UintTy::U32)) => len
145 .try_eval_usize(fx.tcx, ty::ParamEnv::reveal_all())
147 span_bug!(span, "could not evaluate shuffle index array length")
152 fx.tcx.sess.span_err(
155 "simd_shuffle index must be an array of `u32`, got `{}`",
159 // Prevent verifier error
160 fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
165 intrinsic.as_str()["simd_shuffle".len()..].parse().unwrap()
168 assert_eq!(x.layout(), y.layout());
169 let layout = x.layout();
171 let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
172 let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
174 assert_eq!(lane_ty, ret_lane_ty);
175 assert_eq!(u64::from(n), ret_lane_count);
177 let total_len = lane_count * 2;
180 use rustc_middle::mir::interpret::*;
181 let idx_const = crate::constant::mir_operand_get_const_val(fx, idx)
182 .expect("simd_shuffle* idx not const");
184 let idx_bytes = match idx_const {
185 ConstValue::ByRef { alloc, offset } => {
186 let size = Size::from_bytes(
187 4 * ret_lane_count, /* size_of([u32; ret_lane_count]) */
189 alloc.inner().get_bytes(fx, alloc_range(offset, size)).unwrap()
191 _ => unreachable!("{:?}", idx_const),
196 let i = usize::try_from(i).unwrap();
197 let idx = rustc_middle::mir::interpret::read_target_uint(
198 fx.tcx.data_layout.endian,
199 &idx_bytes[4 * i..4 * i + 4],
201 .expect("read_target_uint");
202 u16::try_from(idx).expect("try_from u32")
204 .collect::<Vec<u16>>()
207 for &idx in &indexes {
208 assert!(u64::from(idx) < total_len, "idx {} out of range 0..{}", idx, total_len);
211 for (out_idx, in_idx) in indexes.into_iter().enumerate() {
212 let in_lane = if u64::from(in_idx) < lane_count {
213 x.value_lane(fx, in_idx.into())
215 y.value_lane(fx, u64::from(in_idx) - lane_count)
217 let out_lane = ret.place_lane(fx, u64::try_from(out_idx).unwrap());
218 out_lane.write_cvalue(fx, in_lane);
222 sym::simd_insert => {
223 let (base, idx, val) = match args {
224 [base, idx, val] => (base, idx, val),
226 bug!("wrong number of args for intrinsic {intrinsic}");
229 let base = codegen_operand(fx, base);
230 let val = codegen_operand(fx, val);
233 let idx_const = if let Some(idx_const) =
234 crate::constant::mir_operand_get_const_val(fx, idx)
238 fx.tcx.sess.span_fatal(span, "Index argument for `simd_insert` is not a constant");
242 .try_to_bits(Size::from_bytes(4 /* u32*/))
243 .unwrap_or_else(|| panic!("kind not scalar: {:?}", idx_const));
244 let (lane_count, _lane_ty) = base.layout().ty.simd_size_and_type(fx.tcx);
245 if idx >= lane_count.into() {
246 fx.tcx.sess.span_fatal(
248 &format!("[simd_insert] idx {} >= lane_count {}", idx, lane_count),
252 ret.write_cvalue(fx, base);
253 let ret_lane = ret.place_field(fx, mir::Field::new(idx.try_into().unwrap()));
254 ret_lane.write_cvalue(fx, val);
257 sym::simd_extract => {
258 let (v, idx) = match args {
259 [v, idx] => (v, idx),
261 bug!("wrong number of args for intrinsic {intrinsic}");
264 let v = codegen_operand(fx, v);
266 if !v.layout().ty.is_simd() {
267 report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
271 let idx_const = if let Some(idx_const) =
272 crate::constant::mir_operand_get_const_val(fx, idx)
276 fx.tcx.sess.span_warn(span, "Index argument for `simd_extract` is not a constant");
277 let trap_block = fx.bcx.create_block();
278 let dummy_block = fx.bcx.create_block();
279 let true_ = fx.bcx.ins().iconst(types::I8, 1);
280 fx.bcx.ins().brnz(true_, trap_block, &[]);
281 fx.bcx.ins().jump(dummy_block, &[]);
282 fx.bcx.switch_to_block(trap_block);
283 crate::trap::trap_unimplemented(
285 "Index argument for `simd_extract` is not a constant",
287 fx.bcx.switch_to_block(dummy_block);
292 .try_to_bits(Size::from_bytes(4 /* u32*/))
293 .unwrap_or_else(|| panic!("kind not scalar: {:?}", idx_const));
294 let (lane_count, _lane_ty) = v.layout().ty.simd_size_and_type(fx.tcx);
295 if idx >= lane_count.into() {
296 fx.tcx.sess.span_fatal(
298 &format!("[simd_extract] idx {} >= lane_count {}", idx, lane_count),
302 let ret_lane = v.value_lane(fx, idx.try_into().unwrap());
303 ret.write_cvalue(fx, ret_lane);
307 intrinsic_args!(fx, args => (a); intrinsic);
309 if !a.layout().ty.is_simd() {
310 report_simd_type_validation_error(fx, intrinsic, span, a.layout().ty);
318 &|fx, lane_ty, _ret_lane_ty, lane| match lane_ty.kind() {
319 ty::Int(_) => fx.bcx.ins().ineg(lane),
320 ty::Float(_) => fx.bcx.ins().fneg(lane),
336 intrinsic_args!(fx, args => (x, y); intrinsic);
338 // FIXME use vector instructions when possible
339 simd_pair_for_each_lane(fx, x, y, ret, &|fx, lane_ty, _ret_lane_ty, x_lane, y_lane| {
340 match (lane_ty.kind(), intrinsic) {
341 (ty::Uint(_), sym::simd_add) => fx.bcx.ins().iadd(x_lane, y_lane),
342 (ty::Uint(_), sym::simd_sub) => fx.bcx.ins().isub(x_lane, y_lane),
343 (ty::Uint(_), sym::simd_mul) => fx.bcx.ins().imul(x_lane, y_lane),
344 (ty::Uint(_), sym::simd_div) => fx.bcx.ins().udiv(x_lane, y_lane),
345 (ty::Uint(_), sym::simd_rem) => fx.bcx.ins().urem(x_lane, y_lane),
347 (ty::Int(_), sym::simd_add) => fx.bcx.ins().iadd(x_lane, y_lane),
348 (ty::Int(_), sym::simd_sub) => fx.bcx.ins().isub(x_lane, y_lane),
349 (ty::Int(_), sym::simd_mul) => fx.bcx.ins().imul(x_lane, y_lane),
350 (ty::Int(_), sym::simd_div) => fx.bcx.ins().sdiv(x_lane, y_lane),
351 (ty::Int(_), sym::simd_rem) => fx.bcx.ins().srem(x_lane, y_lane),
353 (ty::Float(_), sym::simd_add) => fx.bcx.ins().fadd(x_lane, y_lane),
354 (ty::Float(_), sym::simd_sub) => fx.bcx.ins().fsub(x_lane, y_lane),
355 (ty::Float(_), sym::simd_mul) => fx.bcx.ins().fmul(x_lane, y_lane),
356 (ty::Float(_), sym::simd_div) => fx.bcx.ins().fdiv(x_lane, y_lane),
357 (ty::Float(FloatTy::F32), sym::simd_rem) => fx.lib_call(
359 vec![AbiParam::new(types::F32), AbiParam::new(types::F32)],
360 vec![AbiParam::new(types::F32)],
363 (ty::Float(FloatTy::F64), sym::simd_rem) => fx.lib_call(
365 vec![AbiParam::new(types::F64), AbiParam::new(types::F64)],
366 vec![AbiParam::new(types::F64)],
370 (ty::Uint(_), sym::simd_shl) => fx.bcx.ins().ishl(x_lane, y_lane),
371 (ty::Uint(_), sym::simd_shr) => fx.bcx.ins().ushr(x_lane, y_lane),
372 (ty::Uint(_), sym::simd_and) => fx.bcx.ins().band(x_lane, y_lane),
373 (ty::Uint(_), sym::simd_or) => fx.bcx.ins().bor(x_lane, y_lane),
374 (ty::Uint(_), sym::simd_xor) => fx.bcx.ins().bxor(x_lane, y_lane),
376 (ty::Int(_), sym::simd_shl) => fx.bcx.ins().ishl(x_lane, y_lane),
377 (ty::Int(_), sym::simd_shr) => fx.bcx.ins().sshr(x_lane, y_lane),
378 (ty::Int(_), sym::simd_and) => fx.bcx.ins().band(x_lane, y_lane),
379 (ty::Int(_), sym::simd_or) => fx.bcx.ins().bor(x_lane, y_lane),
380 (ty::Int(_), sym::simd_xor) => fx.bcx.ins().bxor(x_lane, y_lane),
388 intrinsic_args!(fx, args => (a, b, c); intrinsic);
390 if !a.layout().ty.is_simd() {
391 report_simd_type_validation_error(fx, intrinsic, span, a.layout().ty);
394 assert_eq!(a.layout(), b.layout());
395 assert_eq!(a.layout(), c.layout());
396 assert_eq!(a.layout(), ret.layout());
398 let layout = a.layout();
399 let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
400 let res_lane_layout = fx.layout_of(lane_ty);
402 for lane in 0..lane_count {
403 let a_lane = a.value_lane(fx, lane).load_scalar(fx);
404 let b_lane = b.value_lane(fx, lane).load_scalar(fx);
405 let c_lane = c.value_lane(fx, lane).load_scalar(fx);
407 let res_lane = fx.bcx.ins().fma(a_lane, b_lane, c_lane);
408 let res_lane = CValue::by_val(res_lane, res_lane_layout);
410 ret.place_lane(fx, lane).write_cvalue(fx, res_lane);
414 sym::simd_fmin | sym::simd_fmax => {
415 intrinsic_args!(fx, args => (x, y); intrinsic);
417 if !x.layout().ty.is_simd() {
418 report_simd_type_validation_error(fx, intrinsic, span, x.layout().ty);
422 // FIXME use vector instructions when possible
423 simd_pair_for_each_lane(fx, x, y, ret, &|fx, lane_ty, _ret_lane_ty, x_lane, y_lane| {
424 match lane_ty.kind() {
426 _ => unreachable!("{:?}", lane_ty),
429 sym::simd_fmin => crate::num::codegen_float_min(fx, x_lane, y_lane),
430 sym::simd_fmax => crate::num::codegen_float_max(fx, x_lane, y_lane),
437 intrinsic_args!(fx, args => (a); intrinsic);
439 if !a.layout().ty.is_simd() {
440 report_simd_type_validation_error(fx, intrinsic, span, a.layout().ty);
448 &|fx, lane_ty, _ret_lane_ty, lane| match lane_ty.kind() {
449 ty::Float(FloatTy::F32) => fx.lib_call(
451 vec![AbiParam::new(types::F32)],
452 vec![AbiParam::new(types::F32)],
455 ty::Float(FloatTy::F64) => fx.lib_call(
457 vec![AbiParam::new(types::F64)],
458 vec![AbiParam::new(types::F64)],
461 _ => unreachable!("{:?}", lane_ty),
466 sym::simd_fabs | sym::simd_fsqrt | sym::simd_ceil | sym::simd_floor | sym::simd_trunc => {
467 intrinsic_args!(fx, args => (a); intrinsic);
469 if !a.layout().ty.is_simd() {
470 report_simd_type_validation_error(fx, intrinsic, span, a.layout().ty);
474 simd_for_each_lane(fx, a, ret, &|fx, lane_ty, _ret_lane_ty, lane| {
475 match lane_ty.kind() {
477 _ => unreachable!("{:?}", lane_ty),
480 sym::simd_fabs => fx.bcx.ins().fabs(lane),
481 sym::simd_fsqrt => fx.bcx.ins().sqrt(lane),
482 sym::simd_ceil => fx.bcx.ins().ceil(lane),
483 sym::simd_floor => fx.bcx.ins().floor(lane),
484 sym::simd_trunc => fx.bcx.ins().trunc(lane),
490 sym::simd_reduce_add_ordered | sym::simd_reduce_add_unordered => {
491 intrinsic_args!(fx, args => (v, acc); intrinsic);
492 let acc = acc.load_scalar(fx);
494 // FIXME there must be no acc param for integer vectors
495 if !v.layout().ty.is_simd() {
496 report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
500 simd_reduce(fx, v, Some(acc), ret, &|fx, lane_ty, a, b| {
501 if lane_ty.is_floating_point() {
502 fx.bcx.ins().fadd(a, b)
504 fx.bcx.ins().iadd(a, b)
509 sym::simd_reduce_mul_ordered | sym::simd_reduce_mul_unordered => {
510 intrinsic_args!(fx, args => (v, acc); intrinsic);
511 let acc = acc.load_scalar(fx);
513 // FIXME there must be no acc param for integer vectors
514 if !v.layout().ty.is_simd() {
515 report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
519 simd_reduce(fx, v, Some(acc), ret, &|fx, lane_ty, a, b| {
520 if lane_ty.is_floating_point() {
521 fx.bcx.ins().fmul(a, b)
523 fx.bcx.ins().imul(a, b)
528 sym::simd_reduce_all => {
529 intrinsic_args!(fx, args => (v); intrinsic);
531 if !v.layout().ty.is_simd() {
532 report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
536 simd_reduce_bool(fx, v, ret, &|fx, a, b| fx.bcx.ins().band(a, b));
539 sym::simd_reduce_any => {
540 intrinsic_args!(fx, args => (v); intrinsic);
542 if !v.layout().ty.is_simd() {
543 report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
547 simd_reduce_bool(fx, v, ret, &|fx, a, b| fx.bcx.ins().bor(a, b));
550 sym::simd_reduce_and => {
551 intrinsic_args!(fx, args => (v); intrinsic);
553 if !v.layout().ty.is_simd() {
554 report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
558 simd_reduce(fx, v, None, ret, &|fx, _ty, a, b| fx.bcx.ins().band(a, b));
561 sym::simd_reduce_or => {
562 intrinsic_args!(fx, args => (v); intrinsic);
564 if !v.layout().ty.is_simd() {
565 report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
569 simd_reduce(fx, v, None, ret, &|fx, _ty, a, b| fx.bcx.ins().bor(a, b));
572 sym::simd_reduce_xor => {
573 intrinsic_args!(fx, args => (v); intrinsic);
575 if !v.layout().ty.is_simd() {
576 report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
580 simd_reduce(fx, v, None, ret, &|fx, _ty, a, b| fx.bcx.ins().bxor(a, b));
583 sym::simd_reduce_min => {
584 intrinsic_args!(fx, args => (v); intrinsic);
586 if !v.layout().ty.is_simd() {
587 report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
591 simd_reduce(fx, v, None, ret, &|fx, ty, a, b| {
592 let lt = match ty.kind() {
593 ty::Int(_) => fx.bcx.ins().icmp(IntCC::SignedLessThan, a, b),
594 ty::Uint(_) => fx.bcx.ins().icmp(IntCC::UnsignedLessThan, a, b),
595 ty::Float(_) => return crate::num::codegen_float_min(fx, a, b),
598 fx.bcx.ins().select(lt, a, b)
602 sym::simd_reduce_max => {
603 intrinsic_args!(fx, args => (v); intrinsic);
605 if !v.layout().ty.is_simd() {
606 report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
610 simd_reduce(fx, v, None, ret, &|fx, ty, a, b| {
611 let gt = match ty.kind() {
612 ty::Int(_) => fx.bcx.ins().icmp(IntCC::SignedGreaterThan, a, b),
613 ty::Uint(_) => fx.bcx.ins().icmp(IntCC::UnsignedGreaterThan, a, b),
614 ty::Float(_) => return crate::num::codegen_float_max(fx, a, b),
617 fx.bcx.ins().select(gt, a, b)
621 sym::simd_select => {
622 intrinsic_args!(fx, args => (m, a, b); intrinsic);
624 if !m.layout().ty.is_simd() {
625 report_simd_type_validation_error(fx, intrinsic, span, m.layout().ty);
628 if !a.layout().ty.is_simd() {
629 report_simd_type_validation_error(fx, intrinsic, span, a.layout().ty);
632 assert_eq!(a.layout(), b.layout());
634 let (lane_count, lane_ty) = a.layout().ty.simd_size_and_type(fx.tcx);
635 let lane_layout = fx.layout_of(lane_ty);
637 for lane in 0..lane_count {
638 let m_lane = m.value_lane(fx, lane).load_scalar(fx);
639 let a_lane = a.value_lane(fx, lane).load_scalar(fx);
640 let b_lane = b.value_lane(fx, lane).load_scalar(fx);
642 let m_lane = fx.bcx.ins().icmp_imm(IntCC::Equal, m_lane, 0);
644 CValue::by_val(fx.bcx.ins().select(m_lane, b_lane, a_lane), lane_layout);
646 ret.place_lane(fx, lane).write_cvalue(fx, res_lane);
655 fx.tcx.sess.span_fatal(span, &format!("Unknown SIMD intrinsic {}", intrinsic));