1 //! Codegen `extern "platform-intrinsic"` intrinsics.
3 use rustc_middle::ty::subst::SubstsRef;
4 use rustc_span::Symbol;
5 use rustc_target::abi::Endian;
10 fn report_simd_type_validation_error(
11 fx: &mut FunctionCx<'_, '_, '_>,
16 fx.tcx.sess.span_err(span, &format!("invalid monomorphization of `{}` intrinsic: expected SIMD input type, found non-SIMD `{}`", intrinsic, ty));
17 // Prevent verifier error
18 fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
21 pub(super) fn codegen_simd_intrinsic_call<'tcx>(
22 fx: &mut FunctionCx<'_, '_, 'tcx>,
24 _substs: SubstsRef<'tcx>,
25 args: &[mir::Operand<'tcx>],
30 sym::simd_as | sym::simd_cast => {
31 intrinsic_args!(fx, args => (a); intrinsic);
33 if !a.layout().ty.is_simd() {
34 report_simd_type_validation_error(fx, intrinsic, span, a.layout().ty);
38 simd_for_each_lane(fx, a, ret, &|fx, lane_ty, ret_lane_ty, lane| {
39 let ret_lane_clif_ty = fx.clif_type(ret_lane_ty).unwrap();
41 let from_signed = type_sign(lane_ty);
42 let to_signed = type_sign(ret_lane_ty);
44 clif_int_or_float_cast(fx, lane, from_signed, ret_lane_clif_ty, to_signed)
48 sym::simd_eq | sym::simd_ne | sym::simd_lt | sym::simd_le | sym::simd_gt | sym::simd_ge => {
49 intrinsic_args!(fx, args => (x, y); intrinsic);
51 if !x.layout().ty.is_simd() {
52 report_simd_type_validation_error(fx, intrinsic, span, x.layout().ty);
56 // FIXME use vector instructions when possible
57 simd_pair_for_each_lane(fx, x, y, ret, &|fx, lane_ty, res_lane_ty, x_lane, y_lane| {
58 let res_lane = match (lane_ty.kind(), intrinsic) {
59 (ty::Uint(_), sym::simd_eq) => fx.bcx.ins().icmp(IntCC::Equal, x_lane, y_lane),
60 (ty::Uint(_), sym::simd_ne) => {
61 fx.bcx.ins().icmp(IntCC::NotEqual, x_lane, y_lane)
63 (ty::Uint(_), sym::simd_lt) => {
64 fx.bcx.ins().icmp(IntCC::UnsignedLessThan, x_lane, y_lane)
66 (ty::Uint(_), sym::simd_le) => {
67 fx.bcx.ins().icmp(IntCC::UnsignedLessThanOrEqual, x_lane, y_lane)
69 (ty::Uint(_), sym::simd_gt) => {
70 fx.bcx.ins().icmp(IntCC::UnsignedGreaterThan, x_lane, y_lane)
72 (ty::Uint(_), sym::simd_ge) => {
73 fx.bcx.ins().icmp(IntCC::UnsignedGreaterThanOrEqual, x_lane, y_lane)
76 (ty::Int(_), sym::simd_eq) => fx.bcx.ins().icmp(IntCC::Equal, x_lane, y_lane),
77 (ty::Int(_), sym::simd_ne) => {
78 fx.bcx.ins().icmp(IntCC::NotEqual, x_lane, y_lane)
80 (ty::Int(_), sym::simd_lt) => {
81 fx.bcx.ins().icmp(IntCC::SignedLessThan, x_lane, y_lane)
83 (ty::Int(_), sym::simd_le) => {
84 fx.bcx.ins().icmp(IntCC::SignedLessThanOrEqual, x_lane, y_lane)
86 (ty::Int(_), sym::simd_gt) => {
87 fx.bcx.ins().icmp(IntCC::SignedGreaterThan, x_lane, y_lane)
89 (ty::Int(_), sym::simd_ge) => {
90 fx.bcx.ins().icmp(IntCC::SignedGreaterThanOrEqual, x_lane, y_lane)
93 (ty::Float(_), sym::simd_eq) => {
94 fx.bcx.ins().fcmp(FloatCC::Equal, x_lane, y_lane)
96 (ty::Float(_), sym::simd_ne) => {
97 fx.bcx.ins().fcmp(FloatCC::NotEqual, x_lane, y_lane)
99 (ty::Float(_), sym::simd_lt) => {
100 fx.bcx.ins().fcmp(FloatCC::LessThan, x_lane, y_lane)
102 (ty::Float(_), sym::simd_le) => {
103 fx.bcx.ins().fcmp(FloatCC::LessThanOrEqual, x_lane, y_lane)
105 (ty::Float(_), sym::simd_gt) => {
106 fx.bcx.ins().fcmp(FloatCC::GreaterThan, x_lane, y_lane)
108 (ty::Float(_), sym::simd_ge) => {
109 fx.bcx.ins().fcmp(FloatCC::GreaterThanOrEqual, x_lane, y_lane)
115 let ty = fx.clif_type(res_lane_ty).unwrap();
117 let res_lane = fx.bcx.ins().bint(ty, res_lane);
118 fx.bcx.ins().ineg(res_lane)
122 // simd_shuffle32<T, U>(x: T, y: T, idx: [u32; 32]) -> U
123 _ if intrinsic.as_str().starts_with("simd_shuffle") => {
124 let (x, y, idx) = match args {
125 [x, y, idx] => (x, y, idx),
127 bug!("wrong number of args for intrinsic {intrinsic}");
130 let x = codegen_operand(fx, x);
131 let y = codegen_operand(fx, y);
133 if !x.layout().ty.is_simd() {
134 report_simd_type_validation_error(fx, intrinsic, span, x.layout().ty);
138 // If this intrinsic is the older "simd_shuffleN" form, simply parse the integer.
139 // If there is no suffix, use the index array length.
140 let n: u16 = if intrinsic == sym::simd_shuffle {
141 // Make sure this is actually an array, since typeck only checks the length-suffixed
142 // version of this intrinsic.
143 let idx_ty = fx.monomorphize(idx.ty(fx.mir, fx.tcx));
144 match idx_ty.kind() {
145 ty::Array(ty, len) if matches!(ty.kind(), ty::Uint(ty::UintTy::U32)) => len
146 .try_eval_usize(fx.tcx, ty::ParamEnv::reveal_all())
148 span_bug!(span, "could not evaluate shuffle index array length")
153 fx.tcx.sess.span_err(
156 "simd_shuffle index must be an array of `u32`, got `{}`",
160 // Prevent verifier error
161 fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
166 // FIXME remove this case
167 intrinsic.as_str()["simd_shuffle".len()..].parse().unwrap()
170 assert_eq!(x.layout(), y.layout());
171 let layout = x.layout();
173 let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
174 let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
176 assert_eq!(lane_ty, ret_lane_ty);
177 assert_eq!(u64::from(n), ret_lane_count);
179 let total_len = lane_count * 2;
182 use rustc_middle::mir::interpret::*;
183 let idx_const = crate::constant::mir_operand_get_const_val(fx, idx)
184 .expect("simd_shuffle* idx not const");
186 let idx_bytes = match idx_const {
187 ConstValue::ByRef { alloc, offset } => {
188 let size = Size::from_bytes(
189 4 * ret_lane_count, /* size_of([u32; ret_lane_count]) */
193 .get_bytes_strip_provenance(fx, alloc_range(offset, size))
196 _ => unreachable!("{:?}", idx_const),
201 let i = usize::try_from(i).unwrap();
202 let idx = rustc_middle::mir::interpret::read_target_uint(
203 fx.tcx.data_layout.endian,
204 &idx_bytes[4 * i..4 * i + 4],
206 .expect("read_target_uint");
207 u16::try_from(idx).expect("try_from u32")
209 .collect::<Vec<u16>>()
212 for &idx in &indexes {
213 assert!(u64::from(idx) < total_len, "idx {} out of range 0..{}", idx, total_len);
216 for (out_idx, in_idx) in indexes.into_iter().enumerate() {
217 let in_lane = if u64::from(in_idx) < lane_count {
218 x.value_lane(fx, in_idx.into())
220 y.value_lane(fx, u64::from(in_idx) - lane_count)
222 let out_lane = ret.place_lane(fx, u64::try_from(out_idx).unwrap());
223 out_lane.write_cvalue(fx, in_lane);
227 sym::simd_insert => {
228 let (base, idx, val) = match args {
229 [base, idx, val] => (base, idx, val),
231 bug!("wrong number of args for intrinsic {intrinsic}");
234 let base = codegen_operand(fx, base);
235 let val = codegen_operand(fx, val);
238 let idx_const = if let Some(idx_const) =
239 crate::constant::mir_operand_get_const_val(fx, idx)
243 fx.tcx.sess.span_fatal(span, "Index argument for `simd_insert` is not a constant");
247 .try_to_bits(Size::from_bytes(4 /* u32*/))
248 .unwrap_or_else(|| panic!("kind not scalar: {:?}", idx_const));
249 let (lane_count, _lane_ty) = base.layout().ty.simd_size_and_type(fx.tcx);
250 if idx >= lane_count.into() {
251 fx.tcx.sess.span_fatal(
253 &format!("[simd_insert] idx {} >= lane_count {}", idx, lane_count),
257 ret.write_cvalue(fx, base);
258 let ret_lane = ret.place_field(fx, mir::Field::new(idx.try_into().unwrap()));
259 ret_lane.write_cvalue(fx, val);
262 sym::simd_extract => {
263 let (v, idx) = match args {
264 [v, idx] => (v, idx),
266 bug!("wrong number of args for intrinsic {intrinsic}");
269 let v = codegen_operand(fx, v);
271 if !v.layout().ty.is_simd() {
272 report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
276 let idx_const = if let Some(idx_const) =
277 crate::constant::mir_operand_get_const_val(fx, idx)
281 fx.tcx.sess.span_warn(span, "Index argument for `simd_extract` is not a constant");
282 let trap_block = fx.bcx.create_block();
283 let dummy_block = fx.bcx.create_block();
284 let true_ = fx.bcx.ins().iconst(types::I8, 1);
285 fx.bcx.ins().brnz(true_, trap_block, &[]);
286 fx.bcx.ins().jump(dummy_block, &[]);
287 fx.bcx.switch_to_block(trap_block);
288 crate::trap::trap_unimplemented(
290 "Index argument for `simd_extract` is not a constant",
292 fx.bcx.switch_to_block(dummy_block);
297 .try_to_bits(Size::from_bytes(4 /* u32*/))
298 .unwrap_or_else(|| panic!("kind not scalar: {:?}", idx_const));
299 let (lane_count, _lane_ty) = v.layout().ty.simd_size_and_type(fx.tcx);
300 if idx >= lane_count.into() {
301 fx.tcx.sess.span_fatal(
303 &format!("[simd_extract] idx {} >= lane_count {}", idx, lane_count),
307 let ret_lane = v.value_lane(fx, idx.try_into().unwrap());
308 ret.write_cvalue(fx, ret_lane);
312 intrinsic_args!(fx, args => (a); intrinsic);
314 if !a.layout().ty.is_simd() {
315 report_simd_type_validation_error(fx, intrinsic, span, a.layout().ty);
323 &|fx, lane_ty, _ret_lane_ty, lane| match lane_ty.kind() {
324 ty::Int(_) => fx.bcx.ins().ineg(lane),
325 ty::Float(_) => fx.bcx.ins().fneg(lane),
341 intrinsic_args!(fx, args => (x, y); intrinsic);
343 // FIXME use vector instructions when possible
344 simd_pair_for_each_lane(fx, x, y, ret, &|fx, lane_ty, _ret_lane_ty, x_lane, y_lane| {
345 match (lane_ty.kind(), intrinsic) {
346 (ty::Uint(_), sym::simd_add) => fx.bcx.ins().iadd(x_lane, y_lane),
347 (ty::Uint(_), sym::simd_sub) => fx.bcx.ins().isub(x_lane, y_lane),
348 (ty::Uint(_), sym::simd_mul) => fx.bcx.ins().imul(x_lane, y_lane),
349 (ty::Uint(_), sym::simd_div) => fx.bcx.ins().udiv(x_lane, y_lane),
350 (ty::Uint(_), sym::simd_rem) => fx.bcx.ins().urem(x_lane, y_lane),
352 (ty::Int(_), sym::simd_add) => fx.bcx.ins().iadd(x_lane, y_lane),
353 (ty::Int(_), sym::simd_sub) => fx.bcx.ins().isub(x_lane, y_lane),
354 (ty::Int(_), sym::simd_mul) => fx.bcx.ins().imul(x_lane, y_lane),
355 (ty::Int(_), sym::simd_div) => fx.bcx.ins().sdiv(x_lane, y_lane),
356 (ty::Int(_), sym::simd_rem) => fx.bcx.ins().srem(x_lane, y_lane),
358 (ty::Float(_), sym::simd_add) => fx.bcx.ins().fadd(x_lane, y_lane),
359 (ty::Float(_), sym::simd_sub) => fx.bcx.ins().fsub(x_lane, y_lane),
360 (ty::Float(_), sym::simd_mul) => fx.bcx.ins().fmul(x_lane, y_lane),
361 (ty::Float(_), sym::simd_div) => fx.bcx.ins().fdiv(x_lane, y_lane),
362 (ty::Float(FloatTy::F32), sym::simd_rem) => fx.lib_call(
364 vec![AbiParam::new(types::F32), AbiParam::new(types::F32)],
365 vec![AbiParam::new(types::F32)],
368 (ty::Float(FloatTy::F64), sym::simd_rem) => fx.lib_call(
370 vec![AbiParam::new(types::F64), AbiParam::new(types::F64)],
371 vec![AbiParam::new(types::F64)],
375 (ty::Uint(_), sym::simd_shl) => fx.bcx.ins().ishl(x_lane, y_lane),
376 (ty::Uint(_), sym::simd_shr) => fx.bcx.ins().ushr(x_lane, y_lane),
377 (ty::Uint(_), sym::simd_and) => fx.bcx.ins().band(x_lane, y_lane),
378 (ty::Uint(_), sym::simd_or) => fx.bcx.ins().bor(x_lane, y_lane),
379 (ty::Uint(_), sym::simd_xor) => fx.bcx.ins().bxor(x_lane, y_lane),
381 (ty::Int(_), sym::simd_shl) => fx.bcx.ins().ishl(x_lane, y_lane),
382 (ty::Int(_), sym::simd_shr) => fx.bcx.ins().sshr(x_lane, y_lane),
383 (ty::Int(_), sym::simd_and) => fx.bcx.ins().band(x_lane, y_lane),
384 (ty::Int(_), sym::simd_or) => fx.bcx.ins().bor(x_lane, y_lane),
385 (ty::Int(_), sym::simd_xor) => fx.bcx.ins().bxor(x_lane, y_lane),
393 intrinsic_args!(fx, args => (a, b, c); intrinsic);
395 if !a.layout().ty.is_simd() {
396 report_simd_type_validation_error(fx, intrinsic, span, a.layout().ty);
399 assert_eq!(a.layout(), b.layout());
400 assert_eq!(a.layout(), c.layout());
401 assert_eq!(a.layout(), ret.layout());
403 let layout = a.layout();
404 let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
405 let res_lane_layout = fx.layout_of(lane_ty);
407 for lane in 0..lane_count {
408 let a_lane = a.value_lane(fx, lane).load_scalar(fx);
409 let b_lane = b.value_lane(fx, lane).load_scalar(fx);
410 let c_lane = c.value_lane(fx, lane).load_scalar(fx);
412 let res_lane = fx.bcx.ins().fma(a_lane, b_lane, c_lane);
413 let res_lane = CValue::by_val(res_lane, res_lane_layout);
415 ret.place_lane(fx, lane).write_cvalue(fx, res_lane);
419 sym::simd_fmin | sym::simd_fmax => {
420 intrinsic_args!(fx, args => (x, y); intrinsic);
422 if !x.layout().ty.is_simd() {
423 report_simd_type_validation_error(fx, intrinsic, span, x.layout().ty);
427 // FIXME use vector instructions when possible
428 simd_pair_for_each_lane(fx, x, y, ret, &|fx, lane_ty, _ret_lane_ty, x_lane, y_lane| {
429 match lane_ty.kind() {
431 _ => unreachable!("{:?}", lane_ty),
434 sym::simd_fmin => crate::num::codegen_float_min(fx, x_lane, y_lane),
435 sym::simd_fmax => crate::num::codegen_float_max(fx, x_lane, y_lane),
442 intrinsic_args!(fx, args => (a); intrinsic);
444 if !a.layout().ty.is_simd() {
445 report_simd_type_validation_error(fx, intrinsic, span, a.layout().ty);
453 &|fx, lane_ty, _ret_lane_ty, lane| match lane_ty.kind() {
454 ty::Float(FloatTy::F32) => fx.lib_call(
456 vec![AbiParam::new(types::F32)],
457 vec![AbiParam::new(types::F32)],
460 ty::Float(FloatTy::F64) => fx.lib_call(
462 vec![AbiParam::new(types::F64)],
463 vec![AbiParam::new(types::F64)],
466 _ => unreachable!("{:?}", lane_ty),
471 sym::simd_fabs | sym::simd_fsqrt | sym::simd_ceil | sym::simd_floor | sym::simd_trunc => {
472 intrinsic_args!(fx, args => (a); intrinsic);
474 if !a.layout().ty.is_simd() {
475 report_simd_type_validation_error(fx, intrinsic, span, a.layout().ty);
479 simd_for_each_lane(fx, a, ret, &|fx, lane_ty, _ret_lane_ty, lane| {
480 match lane_ty.kind() {
482 _ => unreachable!("{:?}", lane_ty),
485 sym::simd_fabs => fx.bcx.ins().fabs(lane),
486 sym::simd_fsqrt => fx.bcx.ins().sqrt(lane),
487 sym::simd_ceil => fx.bcx.ins().ceil(lane),
488 sym::simd_floor => fx.bcx.ins().floor(lane),
489 sym::simd_trunc => fx.bcx.ins().trunc(lane),
495 sym::simd_reduce_add_ordered | sym::simd_reduce_add_unordered => {
496 intrinsic_args!(fx, args => (v, acc); intrinsic);
497 let acc = acc.load_scalar(fx);
499 // FIXME there must be no acc param for integer vectors
500 if !v.layout().ty.is_simd() {
501 report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
505 simd_reduce(fx, v, Some(acc), ret, &|fx, lane_ty, a, b| {
506 if lane_ty.is_floating_point() {
507 fx.bcx.ins().fadd(a, b)
509 fx.bcx.ins().iadd(a, b)
514 sym::simd_reduce_mul_ordered | sym::simd_reduce_mul_unordered => {
515 intrinsic_args!(fx, args => (v, acc); intrinsic);
516 let acc = acc.load_scalar(fx);
518 // FIXME there must be no acc param for integer vectors
519 if !v.layout().ty.is_simd() {
520 report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
524 simd_reduce(fx, v, Some(acc), ret, &|fx, lane_ty, a, b| {
525 if lane_ty.is_floating_point() {
526 fx.bcx.ins().fmul(a, b)
528 fx.bcx.ins().imul(a, b)
533 sym::simd_reduce_all => {
534 intrinsic_args!(fx, args => (v); intrinsic);
536 if !v.layout().ty.is_simd() {
537 report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
541 simd_reduce_bool(fx, v, ret, &|fx, a, b| fx.bcx.ins().band(a, b));
544 sym::simd_reduce_any => {
545 intrinsic_args!(fx, args => (v); intrinsic);
547 if !v.layout().ty.is_simd() {
548 report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
552 simd_reduce_bool(fx, v, ret, &|fx, a, b| fx.bcx.ins().bor(a, b));
555 sym::simd_reduce_and => {
556 intrinsic_args!(fx, args => (v); intrinsic);
558 if !v.layout().ty.is_simd() {
559 report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
563 simd_reduce(fx, v, None, ret, &|fx, _ty, a, b| fx.bcx.ins().band(a, b));
566 sym::simd_reduce_or => {
567 intrinsic_args!(fx, args => (v); intrinsic);
569 if !v.layout().ty.is_simd() {
570 report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
574 simd_reduce(fx, v, None, ret, &|fx, _ty, a, b| fx.bcx.ins().bor(a, b));
577 sym::simd_reduce_xor => {
578 intrinsic_args!(fx, args => (v); intrinsic);
580 if !v.layout().ty.is_simd() {
581 report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
585 simd_reduce(fx, v, None, ret, &|fx, _ty, a, b| fx.bcx.ins().bxor(a, b));
588 sym::simd_reduce_min => {
589 intrinsic_args!(fx, args => (v); intrinsic);
591 if !v.layout().ty.is_simd() {
592 report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
596 simd_reduce(fx, v, None, ret, &|fx, ty, a, b| {
597 let lt = match ty.kind() {
598 ty::Int(_) => fx.bcx.ins().icmp(IntCC::SignedLessThan, a, b),
599 ty::Uint(_) => fx.bcx.ins().icmp(IntCC::UnsignedLessThan, a, b),
600 ty::Float(_) => return crate::num::codegen_float_min(fx, a, b),
603 fx.bcx.ins().select(lt, a, b)
607 sym::simd_reduce_max => {
608 intrinsic_args!(fx, args => (v); intrinsic);
610 if !v.layout().ty.is_simd() {
611 report_simd_type_validation_error(fx, intrinsic, span, v.layout().ty);
615 simd_reduce(fx, v, None, ret, &|fx, ty, a, b| {
616 let gt = match ty.kind() {
617 ty::Int(_) => fx.bcx.ins().icmp(IntCC::SignedGreaterThan, a, b),
618 ty::Uint(_) => fx.bcx.ins().icmp(IntCC::UnsignedGreaterThan, a, b),
619 ty::Float(_) => return crate::num::codegen_float_max(fx, a, b),
622 fx.bcx.ins().select(gt, a, b)
626 sym::simd_select => {
627 intrinsic_args!(fx, args => (m, a, b); intrinsic);
629 if !m.layout().ty.is_simd() {
630 report_simd_type_validation_error(fx, intrinsic, span, m.layout().ty);
633 if !a.layout().ty.is_simd() {
634 report_simd_type_validation_error(fx, intrinsic, span, a.layout().ty);
637 assert_eq!(a.layout(), b.layout());
639 let (lane_count, lane_ty) = a.layout().ty.simd_size_and_type(fx.tcx);
640 let lane_layout = fx.layout_of(lane_ty);
642 for lane in 0..lane_count {
643 let m_lane = m.value_lane(fx, lane).load_scalar(fx);
644 let a_lane = a.value_lane(fx, lane).load_scalar(fx);
645 let b_lane = b.value_lane(fx, lane).load_scalar(fx);
647 let m_lane = fx.bcx.ins().icmp_imm(IntCC::Equal, m_lane, 0);
649 CValue::by_val(fx.bcx.ins().select(m_lane, b_lane, a_lane), lane_layout);
651 ret.place_lane(fx, lane).write_cvalue(fx, res_lane);
655 sym::simd_select_bitmask => {
656 intrinsic_args!(fx, args => (m, a, b); intrinsic);
658 if !a.layout().ty.is_simd() {
659 report_simd_type_validation_error(fx, intrinsic, span, a.layout().ty);
662 assert_eq!(a.layout(), b.layout());
664 let (lane_count, lane_ty) = a.layout().ty.simd_size_and_type(fx.tcx);
665 let lane_layout = fx.layout_of(lane_ty);
667 let m = m.load_scalar(fx);
669 for lane in 0..lane_count {
670 let m_lane = fx.bcx.ins().ushr_imm(m, u64::from(lane) as i64);
671 let m_lane = fx.bcx.ins().band_imm(m_lane, 1);
672 let a_lane = a.value_lane(fx, lane).load_scalar(fx);
673 let b_lane = b.value_lane(fx, lane).load_scalar(fx);
675 let m_lane = fx.bcx.ins().icmp_imm(IntCC::Equal, m_lane, 0);
677 CValue::by_val(fx.bcx.ins().select(m_lane, b_lane, a_lane), lane_layout);
679 ret.place_lane(fx, lane).write_cvalue(fx, res_lane);
683 sym::simd_bitmask => {
684 intrinsic_args!(fx, args => (a); intrinsic);
686 let (lane_count, lane_ty) = a.layout().ty.simd_size_and_type(fx.tcx);
687 let lane_clif_ty = fx.clif_type(lane_ty).unwrap();
689 // The `fn simd_bitmask(vector) -> unsigned integer` intrinsic takes a
690 // vector mask and returns the most significant bit (MSB) of each lane in the form
692 // * an unsigned integer
693 // * an array of `u8`
694 // If the vector has less than 8 lanes, a u8 is returned with zeroed trailing bits.
696 // The bit order of the result depends on the byte endianness, LSB-first for little
697 // endian and MSB-first for big endian.
698 let expected_int_bits = lane_count.max(8);
699 let expected_bytes = expected_int_bits / 8 + ((expected_int_bits % 8 > 0) as u64);
701 match lane_ty.kind() {
702 ty::Int(_) | ty::Uint(_) => {}
704 fx.tcx.sess.span_fatal(
707 "invalid monomorphization of `simd_bitmask` intrinsic: \
708 vector argument `{}`'s element type `{}`, expected integer element \
718 Type::int_with_byte_size(u16::try_from(expected_bytes).unwrap()).unwrap();
719 let mut res = fx.bcx.ins().iconst(res_type, 0);
721 let lanes = match fx.tcx.sess.target.endian {
722 Endian::Big => Box::new(0..lane_count) as Box<dyn Iterator<Item = u64>>,
723 Endian::Little => Box::new((0..lane_count).rev()) as Box<dyn Iterator<Item = u64>>,
726 let a_lane = a.value_lane(fx, lane).load_scalar(fx);
728 // extract sign bit of an int
729 let a_lane_sign = fx.bcx.ins().ushr_imm(a_lane, i64::from(lane_clif_ty.bits() - 1));
731 // shift sign bit into result
732 let a_lane_sign = clif_intcast(fx, a_lane_sign, res_type, false);
733 res = fx.bcx.ins().ishl_imm(res, 1);
734 res = fx.bcx.ins().bor(res, a_lane_sign);
737 match ret.layout().ty.kind() {
738 ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => {}
740 if matches!(elem.kind(), ty::Uint(ty::UintTy::U8))
741 && len.try_eval_usize(fx.tcx, ty::ParamEnv::reveal_all())
742 == Some(expected_bytes) => {}
744 fx.tcx.sess.span_fatal(
747 "invalid monomorphization of `simd_bitmask` intrinsic: \
748 cannot return `{}`, expected `u{}` or `[u8; {}]`",
757 let res = CValue::by_val(res, ret.layout());
758 ret.write_cvalue(fx, res);
761 sym::simd_saturating_add | sym::simd_saturating_sub => {
762 intrinsic_args!(fx, args => (x, y); intrinsic);
764 let bin_op = match intrinsic {
765 sym::simd_saturating_add => BinOp::Add,
766 sym::simd_saturating_sub => BinOp::Sub,
770 // FIXME use vector instructions when possible
771 simd_pair_for_each_lane_typed(fx, x, y, ret, &|fx, x_lane, y_lane| {
772 crate::num::codegen_saturating_int_binop(fx, bin_op, x_lane, y_lane)
780 fx.tcx.sess.span_fatal(span, &format!("Unknown SIMD intrinsic {}", intrinsic));