5 use rustc_apfloat::{Float, Round};
6 use rustc_middle::ty::layout::{HasParamEnv, IntegerExt, LayoutOf};
7 use rustc_middle::{mir, mir::BinOp, ty, ty::FloatTy};
8 use rustc_target::abi::{Align, Endian, HasDataLayout, Integer, Size};
11 use helpers::check_arg_count;
14 MirOp(mir::BinOp, bool),
19 impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
20 pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
23 instance: ty::Instance<'tcx>,
24 args: &[OpTy<'tcx, Tag>],
25 ret: Option<(&PlaceTy<'tcx, Tag>, mir::BasicBlock)>,
26 _unwind: StackPopUnwind,
27 ) -> InterpResult<'tcx> {
28 let this = self.eval_context_mut();
30 if this.emulate_intrinsic(instance, args, ret)? {
34 // All supported intrinsics have a return place.
35 let intrinsic_name = this.tcx.item_name(instance.def_id());
36 let intrinsic_name = intrinsic_name.as_str();
37 let (dest, ret) = match ret {
38 None => throw_unsup_format!("unimplemented (diverging) intrinsic: {}", intrinsic_name),
42 // Then handle terminating intrinsics.
43 match intrinsic_name {
44 // Miri overwriting CTFE intrinsics.
45 "ptr_guaranteed_eq" => {
46 let [left, right] = check_arg_count(args)?;
47 let left = this.read_immediate(left)?;
48 let right = this.read_immediate(right)?;
49 this.binop_ignore_overflow(mir::BinOp::Eq, &left, &right, dest)?;
51 "ptr_guaranteed_ne" => {
52 let [left, right] = check_arg_count(args)?;
53 let left = this.read_immediate(left)?;
54 let right = this.read_immediate(right)?;
55 this.binop_ignore_overflow(mir::BinOp::Ne, &left, &right, dest)?;
58 // For now, for compatibility with the run-time implementation of this, we just return null.
59 // See <https://github.com/rust-lang/rust/issues/93935>.
60 this.write_null(dest)?;
62 "const_deallocate" => {
66 // Raw memory accesses
68 let [place] = check_arg_count(args)?;
69 let place = this.deref_operand(place)?;
70 this.copy_op(&place.into(), dest)?;
73 let [place, dest] = check_arg_count(args)?;
74 let place = this.deref_operand(place)?;
75 this.copy_op(dest, &place.into())?;
78 "write_bytes" | "volatile_set_memory" => {
79 let [ptr, val_byte, count] = check_arg_count(args)?;
80 let ty = instance.substs.type_at(0);
81 let ty_layout = this.layout_of(ty)?;
82 let val_byte = this.read_scalar(val_byte)?.to_u8()?;
83 let ptr = this.read_pointer(ptr)?;
84 let count = this.read_scalar(count)?.to_machine_usize(this)?;
85 // `checked_mul` enforces a too small bound (the correct one would probably be machine_isize_max),
86 // but no actual allocation can be big enough for the difference to be noticeable.
87 let byte_count = ty_layout.size.checked_mul(count, this).ok_or_else(|| {
88 err_ub_format!("overflow computing total size of `{}`", intrinsic_name)
92 iter::repeat(val_byte).take(byte_count.bytes() as usize),
96 // Floating-point operations
98 let [f] = check_arg_count(args)?;
99 let f = this.read_scalar(f)?.to_f32()?;
100 // Can be implemented in soft-floats.
101 this.write_scalar(Scalar::from_f32(f.abs()), dest)?;
104 let [f] = check_arg_count(args)?;
105 let f = this.read_scalar(f)?.to_f64()?;
106 // Can be implemented in soft-floats.
107 this.write_scalar(Scalar::from_f64(f.abs()), dest)?;
123 let [f] = check_arg_count(args)?;
124 // FIXME: Using host floats.
125 let f = f32::from_bits(this.read_scalar(f)?.to_u32()?);
126 let f = match intrinsic_name {
129 "sqrtf32" => f.sqrt(),
131 "exp2f32" => f.exp2(),
133 "log10f32" => f.log10(),
134 "log2f32" => f.log2(),
135 "floorf32" => f.floor(),
136 "ceilf32" => f.ceil(),
137 "truncf32" => f.trunc(),
138 "roundf32" => f.round(),
141 this.write_scalar(Scalar::from_u32(f.to_bits()), dest)?;
158 let [f] = check_arg_count(args)?;
159 // FIXME: Using host floats.
160 let f = f64::from_bits(this.read_scalar(f)?.to_u64()?);
161 let f = match intrinsic_name {
164 "sqrtf64" => f.sqrt(),
166 "exp2f64" => f.exp2(),
168 "log10f64" => f.log10(),
169 "log2f64" => f.log2(),
170 "floorf64" => f.floor(),
171 "ceilf64" => f.ceil(),
172 "truncf64" => f.trunc(),
173 "roundf64" => f.round(),
176 this.write_scalar(Scalar::from_u64(f.to_bits()), dest)?;
186 let [a, b] = check_arg_count(args)?;
187 let a = this.read_immediate(a)?;
188 let b = this.read_immediate(b)?;
189 let op = match intrinsic_name {
190 "fadd_fast" => mir::BinOp::Add,
191 "fsub_fast" => mir::BinOp::Sub,
192 "fmul_fast" => mir::BinOp::Mul,
193 "fdiv_fast" => mir::BinOp::Div,
194 "frem_fast" => mir::BinOp::Rem,
197 let float_finite = |x: ImmTy<'tcx, _>| -> InterpResult<'tcx, bool> {
198 Ok(match x.layout.ty.kind() {
199 ty::Float(FloatTy::F32) => x.to_scalar()?.to_f32()?.is_finite(),
200 ty::Float(FloatTy::F64) => x.to_scalar()?.to_f64()?.is_finite(),
202 "`{}` called with non-float input type {:?}",
208 match (float_finite(a)?, float_finite(b)?) {
209 (false, false) => throw_ub_format!(
210 "`{}` intrinsic called with non-finite value as both parameters",
213 (false, _) => throw_ub_format!(
214 "`{}` intrinsic called with non-finite value as first parameter",
217 (_, false) => throw_ub_format!(
218 "`{}` intrinsic called with non-finite value as second parameter",
223 this.binop_ignore_overflow(op, &a, &b, dest)?;
231 let [a, b] = check_arg_count(args)?;
232 let a = this.read_scalar(a)?.to_f32()?;
233 let b = this.read_scalar(b)?.to_f32()?;
234 let res = match intrinsic_name {
235 "minnumf32" => a.min(b),
236 "maxnumf32" => a.max(b),
237 "copysignf32" => a.copy_sign(b),
240 this.write_scalar(Scalar::from_f32(res), dest)?;
248 let [a, b] = check_arg_count(args)?;
249 let a = this.read_scalar(a)?.to_f64()?;
250 let b = this.read_scalar(b)?.to_f64()?;
251 let res = match intrinsic_name {
252 "minnumf64" => a.min(b),
253 "maxnumf64" => a.max(b),
254 "copysignf64" => a.copy_sign(b),
257 this.write_scalar(Scalar::from_f64(res), dest)?;
261 let [f, f2] = check_arg_count(args)?;
262 // FIXME: Using host floats.
263 let f = f32::from_bits(this.read_scalar(f)?.to_u32()?);
264 let f2 = f32::from_bits(this.read_scalar(f2)?.to_u32()?);
265 this.write_scalar(Scalar::from_u32(f.powf(f2).to_bits()), dest)?;
269 let [f, f2] = check_arg_count(args)?;
270 // FIXME: Using host floats.
271 let f = f64::from_bits(this.read_scalar(f)?.to_u64()?);
272 let f2 = f64::from_bits(this.read_scalar(f2)?.to_u64()?);
273 this.write_scalar(Scalar::from_u64(f.powf(f2).to_bits()), dest)?;
277 let [a, b, c] = check_arg_count(args)?;
278 let a = this.read_scalar(a)?.to_f32()?;
279 let b = this.read_scalar(b)?.to_f32()?;
280 let c = this.read_scalar(c)?.to_f32()?;
281 let res = a.mul_add(b, c).value;
282 this.write_scalar(Scalar::from_f32(res), dest)?;
286 let [a, b, c] = check_arg_count(args)?;
287 let a = this.read_scalar(a)?.to_f64()?;
288 let b = this.read_scalar(b)?.to_f64()?;
289 let c = this.read_scalar(c)?.to_f64()?;
290 let res = a.mul_add(b, c).value;
291 this.write_scalar(Scalar::from_f64(res), dest)?;
295 let [f, i] = check_arg_count(args)?;
296 // FIXME: Using host floats.
297 let f = f32::from_bits(this.read_scalar(f)?.to_u32()?);
298 let i = this.read_scalar(i)?.to_i32()?;
299 this.write_scalar(Scalar::from_u32(f.powi(i).to_bits()), dest)?;
303 let [f, i] = check_arg_count(args)?;
304 // FIXME: Using host floats.
305 let f = f64::from_bits(this.read_scalar(f)?.to_u64()?);
306 let i = this.read_scalar(i)?.to_i32()?;
307 this.write_scalar(Scalar::from_u64(f.powi(i).to_bits()), dest)?;
310 "float_to_int_unchecked" => {
311 let [val] = check_arg_count(args)?;
312 let val = this.read_immediate(val)?;
314 let res = match val.layout.ty.kind() {
315 ty::Float(FloatTy::F32) =>
316 this.float_to_int_unchecked(val.to_scalar()?.to_f32()?, dest.layout.ty)?,
317 ty::Float(FloatTy::F64) =>
318 this.float_to_int_unchecked(val.to_scalar()?.to_f64()?, dest.layout.ty)?,
321 "`float_to_int_unchecked` called with non-float input type {:?}",
326 this.write_scalar(res, dest)?;
338 let [op] = check_arg_count(args)?;
339 let (op, op_len) = this.operand_to_simd(op)?;
340 let (dest, dest_len) = this.place_to_simd(dest)?;
342 assert_eq!(dest_len, op_len);
344 #[derive(Copy, Clone)]
352 #[derive(Copy, Clone)]
358 let which = match intrinsic_name {
359 "simd_neg" => Op::MirOp(mir::UnOp::Neg),
360 "simd_fabs" => Op::Abs,
361 "simd_ceil" => Op::HostOp(HostFloatOp::Ceil),
362 "simd_floor" => Op::HostOp(HostFloatOp::Floor),
363 "simd_round" => Op::HostOp(HostFloatOp::Round),
364 "simd_trunc" => Op::HostOp(HostFloatOp::Trunc),
365 "simd_fsqrt" => Op::HostOp(HostFloatOp::Sqrt),
369 for i in 0..dest_len {
370 let op = this.read_immediate(&this.mplace_index(&op, i)?.into())?;
371 let dest = this.mplace_index(&dest, i)?;
372 let val = match which {
373 Op::MirOp(mir_op) => this.unary_op(mir_op, &op)?.to_scalar()?,
375 // Works for f32 and f64.
376 let ty::Float(float_ty) = op.layout.ty.kind() else {
377 bug!("{} operand is not a float", intrinsic_name)
379 let op = op.to_scalar()?;
381 FloatTy::F32 => Scalar::from_f32(op.to_f32()?.abs()),
382 FloatTy::F64 => Scalar::from_f64(op.to_f64()?.abs()),
385 Op::HostOp(host_op) => {
386 let ty::Float(float_ty) = op.layout.ty.kind() else {
387 bug!("{} operand is not a float", intrinsic_name)
389 // FIXME using host floats
392 let f = f32::from_bits(op.to_scalar()?.to_u32()?);
393 let res = match host_op {
394 HostFloatOp::Ceil => f.ceil(),
395 HostFloatOp::Floor => f.floor(),
396 HostFloatOp::Round => f.round(),
397 HostFloatOp::Trunc => f.trunc(),
398 HostFloatOp::Sqrt => f.sqrt(),
400 Scalar::from_u32(res.to_bits())
403 let f = f64::from_bits(op.to_scalar()?.to_u64()?);
404 let res = match host_op {
405 HostFloatOp::Ceil => f.ceil(),
406 HostFloatOp::Floor => f.floor(),
407 HostFloatOp::Round => f.round(),
408 HostFloatOp::Trunc => f.trunc(),
409 HostFloatOp::Sqrt => f.sqrt(),
411 Scalar::from_u64(res.to_bits())
417 this.write_scalar(val, &dest.into())?;
439 | "simd_saturating_add"
440 | "simd_saturating_sub"
441 | "simd_arith_offset" => {
444 let [left, right] = check_arg_count(args)?;
445 let (left, left_len) = this.operand_to_simd(left)?;
446 let (right, right_len) = this.operand_to_simd(right)?;
447 let (dest, dest_len) = this.place_to_simd(dest)?;
449 assert_eq!(dest_len, left_len);
450 assert_eq!(dest_len, right_len);
459 let which = match intrinsic_name {
460 "simd_add" => Op::MirOp(BinOp::Add),
461 "simd_sub" => Op::MirOp(BinOp::Sub),
462 "simd_mul" => Op::MirOp(BinOp::Mul),
463 "simd_div" => Op::MirOp(BinOp::Div),
464 "simd_rem" => Op::MirOp(BinOp::Rem),
465 "simd_shl" => Op::MirOp(BinOp::Shl),
466 "simd_shr" => Op::MirOp(BinOp::Shr),
467 "simd_and" => Op::MirOp(BinOp::BitAnd),
468 "simd_or" => Op::MirOp(BinOp::BitOr),
469 "simd_xor" => Op::MirOp(BinOp::BitXor),
470 "simd_eq" => Op::MirOp(BinOp::Eq),
471 "simd_ne" => Op::MirOp(BinOp::Ne),
472 "simd_lt" => Op::MirOp(BinOp::Lt),
473 "simd_le" => Op::MirOp(BinOp::Le),
474 "simd_gt" => Op::MirOp(BinOp::Gt),
475 "simd_ge" => Op::MirOp(BinOp::Ge),
476 "simd_fmax" => Op::FMax,
477 "simd_fmin" => Op::FMin,
478 "simd_saturating_add" => Op::SaturatingOp(BinOp::Add),
479 "simd_saturating_sub" => Op::SaturatingOp(BinOp::Sub),
480 "simd_arith_offset" => Op::WrappingOffset,
484 for i in 0..dest_len {
485 let left = this.read_immediate(&this.mplace_index(&left, i)?.into())?;
486 let right = this.read_immediate(&this.mplace_index(&right, i)?.into())?;
487 let dest = this.mplace_index(&dest, i)?;
488 let val = match which {
489 Op::MirOp(mir_op) => {
490 let (val, overflowed, ty) = this.overflowing_binary_op(mir_op, &left, &right)?;
491 if matches!(mir_op, BinOp::Shl | BinOp::Shr) {
492 // Shifts have extra UB as SIMD operations that the MIR binop does not have.
493 // See <https://github.com/rust-lang/rust/issues/91237>.
495 let r_val = right.to_scalar()?.to_bits(right.layout.size)?;
496 throw_ub_format!("overflowing shift by {} in `{}` in SIMD lane {}", r_val, intrinsic_name, i);
499 if matches!(mir_op, BinOp::Eq | BinOp::Ne | BinOp::Lt | BinOp::Le | BinOp::Gt | BinOp::Ge) {
500 // Special handling for boolean-returning operations
501 assert_eq!(ty, this.tcx.types.bool);
502 let val = val.to_bool().unwrap();
503 bool_to_simd_element(val, dest.layout.size)
505 assert_ne!(ty, this.tcx.types.bool);
506 assert_eq!(ty, dest.layout.ty);
510 Op::SaturatingOp(mir_op) => {
511 this.saturating_arith(mir_op, &left, &right)?
513 Op::WrappingOffset => {
514 let ptr = this.scalar_to_ptr(left.to_scalar()?)?;
515 let offset_count = right.to_scalar()?.to_machine_isize(this)?;
516 let pointee_ty = left.layout.ty.builtin_deref(true).unwrap().ty;
518 let pointee_size = i64::try_from(this.layout_of(pointee_ty)?.size.bytes()).unwrap();
519 let offset_bytes = offset_count.wrapping_mul(pointee_size);
520 let offset_ptr = ptr.wrapping_signed_offset(offset_bytes, this);
521 Scalar::from_maybe_pointer(offset_ptr, this)
524 fmax_op(&left, &right)?
527 fmin_op(&left, &right)?
530 this.write_scalar(val, &dest.into())?;
534 let [a, b, c] = check_arg_count(args)?;
535 let (a, a_len) = this.operand_to_simd(a)?;
536 let (b, b_len) = this.operand_to_simd(b)?;
537 let (c, c_len) = this.operand_to_simd(c)?;
538 let (dest, dest_len) = this.place_to_simd(dest)?;
540 assert_eq!(dest_len, a_len);
541 assert_eq!(dest_len, b_len);
542 assert_eq!(dest_len, c_len);
544 for i in 0..dest_len {
545 let a = this.read_immediate(&this.mplace_index(&a, i)?.into())?.to_scalar()?;
546 let b = this.read_immediate(&this.mplace_index(&b, i)?.into())?.to_scalar()?;
547 let c = this.read_immediate(&this.mplace_index(&c, i)?.into())?.to_scalar()?;
548 let dest = this.mplace_index(&dest, i)?;
550 // Works for f32 and f64.
551 let ty::Float(float_ty) = dest.layout.ty.kind() else {
552 bug!("{} operand is not a float", intrinsic_name)
554 let val = match float_ty {
556 Scalar::from_f32(a.to_f32()?.mul_add(b.to_f32()?, c.to_f32()?).value),
558 Scalar::from_f64(a.to_f64()?.mul_add(b.to_f64()?, c.to_f64()?).value),
560 this.write_scalar(val, &dest.into())?;
570 | "simd_reduce_min" => {
573 let [op] = check_arg_count(args)?;
574 let (op, op_len) = this.operand_to_simd(op)?;
577 |b| ImmTy::from_scalar(Scalar::from_bool(b), this.machine.layouts.bool);
585 let which = match intrinsic_name {
586 "simd_reduce_and" => Op::MirOp(BinOp::BitAnd),
587 "simd_reduce_or" => Op::MirOp(BinOp::BitOr),
588 "simd_reduce_xor" => Op::MirOp(BinOp::BitXor),
589 "simd_reduce_any" => Op::MirOpBool(BinOp::BitOr),
590 "simd_reduce_all" => Op::MirOpBool(BinOp::BitAnd),
591 "simd_reduce_max" => Op::Max,
592 "simd_reduce_min" => Op::Min,
596 // Initialize with first lane, then proceed with the rest.
597 let mut res = this.read_immediate(&this.mplace_index(&op, 0)?.into())?;
598 if matches!(which, Op::MirOpBool(_)) {
599 // Convert to `bool` scalar.
600 res = imm_from_bool(simd_element_to_bool(res)?);
603 let op = this.read_immediate(&this.mplace_index(&op, i)?.into())?;
605 Op::MirOp(mir_op) => {
606 this.binary_op(mir_op, &res, &op)?
608 Op::MirOpBool(mir_op) => {
609 let op = imm_from_bool(simd_element_to_bool(op)?);
610 this.binary_op(mir_op, &res, &op)?
613 if matches!(res.layout.ty.kind(), ty::Float(_)) {
614 ImmTy::from_scalar(fmax_op(&res, &op)?, res.layout)
616 // Just boring integers, so NaNs to worry about
617 if this.binary_op(BinOp::Ge, &res, &op)?.to_scalar()?.to_bool()? {
625 if matches!(res.layout.ty.kind(), ty::Float(_)) {
626 ImmTy::from_scalar(fmin_op(&res, &op)?, res.layout)
628 // Just boring integers, so NaNs to worry about
629 if this.binary_op(BinOp::Le, &res, &op)?.to_scalar()?.to_bool()? {
638 this.write_immediate(*res, dest)?;
641 | "simd_reduce_add_ordered"
642 | "simd_reduce_mul_ordered" => {
645 let [op, init] = check_arg_count(args)?;
646 let (op, op_len) = this.operand_to_simd(op)?;
647 let init = this.read_immediate(init)?;
649 let mir_op = match intrinsic_name {
650 "simd_reduce_add_ordered" => BinOp::Add,
651 "simd_reduce_mul_ordered" => BinOp::Mul,
657 let op = this.read_immediate(&this.mplace_index(&op, i)?.into())?;
658 res = this.binary_op(mir_op, &res, &op)?;
660 this.write_immediate(*res, dest)?;
663 let [mask, yes, no] = check_arg_count(args)?;
664 let (mask, mask_len) = this.operand_to_simd(mask)?;
665 let (yes, yes_len) = this.operand_to_simd(yes)?;
666 let (no, no_len) = this.operand_to_simd(no)?;
667 let (dest, dest_len) = this.place_to_simd(dest)?;
669 assert_eq!(dest_len, mask_len);
670 assert_eq!(dest_len, yes_len);
671 assert_eq!(dest_len, no_len);
673 for i in 0..dest_len {
674 let mask = this.read_immediate(&this.mplace_index(&mask, i)?.into())?;
675 let yes = this.read_immediate(&this.mplace_index(&yes, i)?.into())?;
676 let no = this.read_immediate(&this.mplace_index(&no, i)?.into())?;
677 let dest = this.mplace_index(&dest, i)?;
679 let val = if simd_element_to_bool(mask)? { yes } else { no };
680 this.write_immediate(*val, &dest.into())?;
683 "simd_select_bitmask" => {
684 let [mask, yes, no] = check_arg_count(args)?;
685 let (yes, yes_len) = this.operand_to_simd(yes)?;
686 let (no, no_len) = this.operand_to_simd(no)?;
687 let (dest, dest_len) = this.place_to_simd(dest)?;
688 let bitmask_len = dest_len.max(8);
690 assert!(mask.layout.ty.is_integral());
691 assert!(bitmask_len <= 64);
692 assert_eq!(bitmask_len, mask.layout.size.bits());
693 assert_eq!(dest_len, yes_len);
694 assert_eq!(dest_len, no_len);
699 .to_bits(mask.layout.size)?
702 for i in 0..dest_len {
704 mask & (1 << simd_bitmask_index(i, dest_len, this.data_layout().endian));
705 let yes = this.read_immediate(&this.mplace_index(&yes, i)?.into())?;
706 let no = this.read_immediate(&this.mplace_index(&no, i)?.into())?;
707 let dest = this.mplace_index(&dest, i)?;
709 let val = if mask != 0 { yes } else { no };
710 this.write_immediate(*val, &dest.into())?;
712 for i in dest_len..bitmask_len {
713 // If the mask is "padded", ensure that padding is all-zero.
714 let mask = mask & (1 << i);
717 "a SIMD bitmask less than 8 bits long must be filled with 0s for the remaining bits"
723 "simd_cast" | "simd_as" => {
724 let [op] = check_arg_count(args)?;
725 let (op, op_len) = this.operand_to_simd(op)?;
726 let (dest, dest_len) = this.place_to_simd(dest)?;
728 assert_eq!(dest_len, op_len);
730 let safe_cast = intrinsic_name == "simd_as";
732 for i in 0..dest_len {
733 let op = this.read_immediate(&this.mplace_index(&op, i)?.into())?;
734 let dest = this.mplace_index(&dest, i)?;
736 let val = match (op.layout.ty.kind(), dest.layout.ty.kind()) {
737 // Int-to-(int|float): always safe
738 (ty::Int(_) | ty::Uint(_), ty::Int(_) | ty::Uint(_) | ty::Float(_)) =>
739 this.misc_cast(&op, dest.layout.ty)?,
740 // Float-to-float: always safe
741 (ty::Float(_), ty::Float(_)) =>
742 this.misc_cast(&op, dest.layout.ty)?,
743 // Float-to-int in safe mode
744 (ty::Float(_), ty::Int(_) | ty::Uint(_)) if safe_cast =>
745 this.misc_cast(&op, dest.layout.ty)?,
746 // Float-to-int in unchecked mode
747 (ty::Float(FloatTy::F32), ty::Int(_) | ty::Uint(_)) if !safe_cast =>
748 this.float_to_int_unchecked(op.to_scalar()?.to_f32()?, dest.layout.ty)?.into(),
749 (ty::Float(FloatTy::F64), ty::Int(_) | ty::Uint(_)) if !safe_cast =>
750 this.float_to_int_unchecked(op.to_scalar()?.to_f64()?, dest.layout.ty)?.into(),
753 "Unsupported SIMD cast from element type {} to {}",
758 this.write_immediate(val, &dest.into())?;
762 let [left, right, index] = check_arg_count(args)?;
763 let (left, left_len) = this.operand_to_simd(left)?;
764 let (right, right_len) = this.operand_to_simd(right)?;
765 let (dest, dest_len) = this.place_to_simd(dest)?;
767 // `index` is an array, not a SIMD type
768 let ty::Array(_, index_len) = index.layout.ty.kind() else {
769 bug!("simd_shuffle index argument has non-array type {}", index.layout.ty)
771 let index_len = index_len.eval_usize(*this.tcx, this.param_env());
773 assert_eq!(left_len, right_len);
774 assert_eq!(index_len, dest_len);
776 for i in 0..dest_len {
777 let src_index: u64 = this
778 .read_immediate(&this.operand_index(index, i)?)?
782 let dest = this.mplace_index(&dest, i)?;
784 let val = if src_index < left_len {
785 this.read_immediate(&this.mplace_index(&left, src_index)?.into())?
786 } else if src_index < left_len.checked_add(right_len).unwrap() {
788 &this.mplace_index(&right, src_index - left_len)?.into(),
792 "simd_shuffle index {} is out of bounds for 2 vectors of size {}",
797 this.write_immediate(*val, &dest.into())?;
801 let [passthru, ptrs, mask] = check_arg_count(args)?;
802 let (passthru, passthru_len) = this.operand_to_simd(passthru)?;
803 let (ptrs, ptrs_len) = this.operand_to_simd(ptrs)?;
804 let (mask, mask_len) = this.operand_to_simd(mask)?;
805 let (dest, dest_len) = this.place_to_simd(dest)?;
807 assert_eq!(dest_len, passthru_len);
808 assert_eq!(dest_len, ptrs_len);
809 assert_eq!(dest_len, mask_len);
811 for i in 0..dest_len {
812 let passthru = this.read_immediate(&this.mplace_index(&passthru, i)?.into())?;
813 let ptr = this.read_immediate(&this.mplace_index(&ptrs, i)?.into())?;
814 let mask = this.read_immediate(&this.mplace_index(&mask, i)?.into())?;
815 let dest = this.mplace_index(&dest, i)?;
817 let val = if simd_element_to_bool(mask)? {
818 let place = this.deref_operand(&ptr.into())?;
819 this.read_immediate(&place.into())?
823 this.write_immediate(*val, &dest.into())?;
827 let [value, ptrs, mask] = check_arg_count(args)?;
828 let (value, value_len) = this.operand_to_simd(value)?;
829 let (ptrs, ptrs_len) = this.operand_to_simd(ptrs)?;
830 let (mask, mask_len) = this.operand_to_simd(mask)?;
832 assert_eq!(ptrs_len, value_len);
833 assert_eq!(ptrs_len, mask_len);
835 for i in 0..ptrs_len {
836 let value = this.read_immediate(&this.mplace_index(&value, i)?.into())?;
837 let ptr = this.read_immediate(&this.mplace_index(&ptrs, i)?.into())?;
838 let mask = this.read_immediate(&this.mplace_index(&mask, i)?.into())?;
840 if simd_element_to_bool(mask)? {
841 let place = this.deref_operand(&ptr.into())?;
842 this.write_immediate(*value, &place.into())?;
847 let [op] = check_arg_count(args)?;
848 let (op, op_len) = this.operand_to_simd(op)?;
849 let bitmask_len = op_len.max(8);
851 assert!(dest.layout.ty.is_integral());
852 assert!(bitmask_len <= 64);
853 assert_eq!(bitmask_len, dest.layout.size.bits());
857 let op = this.read_immediate(&this.mplace_index(&op, i)?.into())?;
858 if simd_element_to_bool(op)? {
859 res |= 1 << simd_bitmask_index(i, op_len, this.data_layout().endian);
862 this.write_int(res, dest)?;
866 "atomic_load" => this.atomic_load(args, dest, AtomicReadOp::SeqCst)?,
867 "atomic_load_relaxed" => this.atomic_load(args, dest, AtomicReadOp::Relaxed)?,
868 "atomic_load_acq" => this.atomic_load(args, dest, AtomicReadOp::Acquire)?,
870 "atomic_store" => this.atomic_store(args, AtomicWriteOp::SeqCst)?,
871 "atomic_store_relaxed" => this.atomic_store(args, AtomicWriteOp::Relaxed)?,
872 "atomic_store_rel" => this.atomic_store(args, AtomicWriteOp::Release)?,
874 "atomic_fence_acq" => this.atomic_fence(args, AtomicFenceOp::Acquire)?,
875 "atomic_fence_rel" => this.atomic_fence(args, AtomicFenceOp::Release)?,
876 "atomic_fence_acqrel" => this.atomic_fence(args, AtomicFenceOp::AcqRel)?,
877 "atomic_fence" => this.atomic_fence(args, AtomicFenceOp::SeqCst)?,
879 "atomic_singlethreadfence_acq" => this.compiler_fence(args, AtomicFenceOp::Acquire)?,
880 "atomic_singlethreadfence_rel" => this.compiler_fence(args, AtomicFenceOp::Release)?,
881 "atomic_singlethreadfence_acqrel" =>
882 this.compiler_fence(args, AtomicFenceOp::AcqRel)?,
883 "atomic_singlethreadfence" => this.compiler_fence(args, AtomicFenceOp::SeqCst)?,
885 "atomic_xchg" => this.atomic_exchange(args, dest, AtomicRwOp::SeqCst)?,
886 "atomic_xchg_acq" => this.atomic_exchange(args, dest, AtomicRwOp::Acquire)?,
887 "atomic_xchg_rel" => this.atomic_exchange(args, dest, AtomicRwOp::Release)?,
888 "atomic_xchg_acqrel" => this.atomic_exchange(args, dest, AtomicRwOp::AcqRel)?,
889 "atomic_xchg_relaxed" => this.atomic_exchange(args, dest, AtomicRwOp::Relaxed)?,
893 this.atomic_compare_exchange(args, dest, AtomicRwOp::SeqCst, AtomicReadOp::SeqCst)?,
895 "atomic_cxchg_acq" =>
896 this.atomic_compare_exchange(args, dest, AtomicRwOp::Acquire, AtomicReadOp::Acquire)?,
898 "atomic_cxchg_rel" =>
899 this.atomic_compare_exchange(args, dest, AtomicRwOp::Release, AtomicReadOp::Relaxed)?,
901 "atomic_cxchg_acqrel" =>
902 this.atomic_compare_exchange(args, dest, AtomicRwOp::AcqRel, AtomicReadOp::Acquire)?,
904 "atomic_cxchg_relaxed" =>
905 this.atomic_compare_exchange(args, dest, AtomicRwOp::Relaxed, AtomicReadOp::Relaxed)?,
907 "atomic_cxchg_acq_failrelaxed" =>
908 this.atomic_compare_exchange(args, dest, AtomicRwOp::Acquire, AtomicReadOp::Relaxed)?,
910 "atomic_cxchg_acqrel_failrelaxed" =>
911 this.atomic_compare_exchange(args, dest, AtomicRwOp::AcqRel, AtomicReadOp::Relaxed)?,
913 "atomic_cxchg_failrelaxed" =>
914 this.atomic_compare_exchange(args, dest, AtomicRwOp::SeqCst, AtomicReadOp::Relaxed)?,
916 "atomic_cxchg_failacq" =>
917 this.atomic_compare_exchange(args, dest, AtomicRwOp::SeqCst, AtomicReadOp::Acquire)?,
920 "atomic_cxchgweak" =>
921 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::SeqCst, AtomicReadOp::SeqCst)?,
923 "atomic_cxchgweak_acq" =>
924 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::Acquire, AtomicReadOp::Acquire)?,
926 "atomic_cxchgweak_rel" =>
927 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::Release, AtomicReadOp::Relaxed)?,
929 "atomic_cxchgweak_acqrel" =>
930 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::AcqRel, AtomicReadOp::Acquire)?,
932 "atomic_cxchgweak_relaxed" =>
933 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::Relaxed, AtomicReadOp::Relaxed)?,
935 "atomic_cxchgweak_acq_failrelaxed" =>
936 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::Acquire, AtomicReadOp::Relaxed)?,
938 "atomic_cxchgweak_acqrel_failrelaxed" =>
939 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::AcqRel, AtomicReadOp::Relaxed)?,
941 "atomic_cxchgweak_failrelaxed" =>
942 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::SeqCst, AtomicReadOp::Relaxed)?,
944 "atomic_cxchgweak_failacq" =>
945 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::SeqCst, AtomicReadOp::Acquire)?,
949 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOp::SeqCst)?,
952 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOp::Acquire)?,
955 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOp::Release)?,
957 "atomic_or_acqrel" =>
958 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOp::AcqRel)?,
960 "atomic_or_relaxed" =>
961 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOp::Relaxed)?,
964 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOp::SeqCst)?,
967 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOp::Acquire)?,
970 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOp::Release)?,
972 "atomic_xor_acqrel" =>
973 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOp::AcqRel)?,
975 "atomic_xor_relaxed" =>
976 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOp::Relaxed)?,
979 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOp::SeqCst)?,
982 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOp::Acquire)?,
985 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOp::Release)?,
987 "atomic_and_acqrel" =>
988 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOp::AcqRel)?,
990 "atomic_and_relaxed" =>
991 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOp::Relaxed)?,
994 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOp::SeqCst)?,
997 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOp::Acquire)?,
1000 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOp::Release)?,
1002 "atomic_nand_acqrel" =>
1003 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOp::AcqRel)?,
1005 "atomic_nand_relaxed" =>
1006 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOp::Relaxed)?,
1009 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOp::SeqCst)?,
1011 "atomic_xadd_acq" =>
1012 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOp::Acquire)?,
1014 "atomic_xadd_rel" =>
1015 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOp::Release)?,
1017 "atomic_xadd_acqrel" =>
1018 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOp::AcqRel)?,
1020 "atomic_xadd_relaxed" =>
1021 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOp::Relaxed)?,
1024 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOp::SeqCst)?,
1026 "atomic_xsub_acq" =>
1027 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOp::Acquire)?,
1029 "atomic_xsub_rel" =>
1030 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOp::Release)?,
1032 "atomic_xsub_acqrel" =>
1033 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOp::AcqRel)?,
1035 "atomic_xsub_relaxed" =>
1036 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOp::Relaxed)?,
1037 "atomic_min" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::SeqCst)?,
1038 "atomic_min_acq" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::Acquire)?,
1039 "atomic_min_rel" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::Release)?,
1040 "atomic_min_acqrel" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::AcqRel)?,
1041 "atomic_min_relaxed" =>
1042 this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::Relaxed)?,
1043 "atomic_max" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::SeqCst)?,
1044 "atomic_max_acq" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::Acquire)?,
1045 "atomic_max_rel" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::Release)?,
1046 "atomic_max_acqrel" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::AcqRel)?,
1047 "atomic_max_relaxed" =>
1048 this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::Relaxed)?,
1049 "atomic_umin" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::SeqCst)?,
1050 "atomic_umin_acq" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::Acquire)?,
1051 "atomic_umin_rel" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::Release)?,
1052 "atomic_umin_acqrel" =>
1053 this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::AcqRel)?,
1054 "atomic_umin_relaxed" =>
1055 this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::Relaxed)?,
1056 "atomic_umax" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::SeqCst)?,
1057 "atomic_umax_acq" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::Acquire)?,
1058 "atomic_umax_rel" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::Release)?,
1059 "atomic_umax_acqrel" =>
1060 this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::AcqRel)?,
1061 "atomic_umax_relaxed" =>
1062 this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::Relaxed)?,
1066 let [num, denom] = check_arg_count(args)?;
1067 this.exact_div(&this.read_immediate(num)?, &this.read_immediate(denom)?, dest)?;
1070 "try" => return this.handle_try(args, dest, ret),
1073 let [] = check_arg_count(args)?;
1074 // normally this would raise a SIGTRAP, which aborts if no debugger is connected
1075 throw_machine_stop!(TerminationInfo::Abort("Trace/breakpoint trap".to_string()))
1078 name => throw_unsup_format!("unimplemented intrinsic: {}", name),
1081 trace!("{:?}", this.dump_place(**dest));
1082 this.go_to_block(ret);
1088 args: &[OpTy<'tcx, Tag>],
1089 dest: &PlaceTy<'tcx, Tag>,
1090 atomic: AtomicReadOp,
1091 ) -> InterpResult<'tcx> {
1092 let this = self.eval_context_mut();
1094 let [place] = check_arg_count(args)?;
1095 let place = this.deref_operand(place)?;
1097 // make sure it fits into a scalar; otherwise it cannot be atomic
1098 let val = this.read_scalar_atomic(&place, atomic)?;
1100 // Check alignment requirements. Atomics must always be aligned to their size,
1101 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
1103 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
1104 this.check_ptr_access_align(
1108 CheckInAllocMsg::MemoryAccessTest,
1110 // Perform regular access.
1111 this.write_scalar(val, dest)?;
1117 args: &[OpTy<'tcx, Tag>],
1118 atomic: AtomicWriteOp,
1119 ) -> InterpResult<'tcx> {
1120 let this = self.eval_context_mut();
1122 let [place, val] = check_arg_count(args)?;
1123 let place = this.deref_operand(place)?;
1124 let val = this.read_scalar(val)?; // make sure it fits into a scalar; otherwise it cannot be atomic
1126 // Check alignment requirements. Atomics must always be aligned to their size,
1127 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
1129 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
1130 this.check_ptr_access_align(
1134 CheckInAllocMsg::MemoryAccessTest,
1137 // Perform atomic store
1138 this.write_scalar_atomic(val, &place, atomic)?;
1144 args: &[OpTy<'tcx, Tag>],
1145 atomic: AtomicFenceOp,
1146 ) -> InterpResult<'tcx> {
1147 let [] = check_arg_count(args)?;
1149 //FIXME: compiler fences are currently ignored
1155 args: &[OpTy<'tcx, Tag>],
1156 atomic: AtomicFenceOp,
1157 ) -> InterpResult<'tcx> {
1158 let this = self.eval_context_mut();
1159 let [] = check_arg_count(args)?;
1160 this.validate_atomic_fence(atomic)?;
1166 args: &[OpTy<'tcx, Tag>],
1167 dest: &PlaceTy<'tcx, Tag>,
1168 atomic_op: AtomicOp,
1170 ) -> InterpResult<'tcx> {
1171 let this = self.eval_context_mut();
1173 let [place, rhs] = check_arg_count(args)?;
1174 let place = this.deref_operand(place)?;
1176 if !place.layout.ty.is_integral() {
1177 bug!("Atomic arithmetic operations only work on integer types");
1179 let rhs = this.read_immediate(rhs)?;
1181 // Check alignment requirements. Atomics must always be aligned to their size,
1182 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
1184 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
1185 this.check_ptr_access_align(
1189 CheckInAllocMsg::MemoryAccessTest,
1194 let old = this.atomic_min_max_scalar(&place, rhs, true, atomic)?;
1195 this.write_immediate(*old, dest)?; // old value is returned
1199 let old = this.atomic_min_max_scalar(&place, rhs, false, atomic)?;
1200 this.write_immediate(*old, dest)?; // old value is returned
1203 AtomicOp::MirOp(op, neg) => {
1204 let old = this.atomic_op_immediate(&place, &rhs, op, neg, atomic)?;
1205 this.write_immediate(*old, dest)?; // old value is returned
1213 args: &[OpTy<'tcx, Tag>],
1214 dest: &PlaceTy<'tcx, Tag>,
1216 ) -> InterpResult<'tcx> {
1217 let this = self.eval_context_mut();
1219 let [place, new] = check_arg_count(args)?;
1220 let place = this.deref_operand(place)?;
1221 let new = this.read_scalar(new)?;
1223 // Check alignment requirements. Atomics must always be aligned to their size,
1224 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
1226 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
1227 this.check_ptr_access_align(
1231 CheckInAllocMsg::MemoryAccessTest,
1234 let old = this.atomic_exchange_scalar(&place, new, atomic)?;
1235 this.write_scalar(old, dest)?; // old value is returned
1239 fn atomic_compare_exchange_impl(
1241 args: &[OpTy<'tcx, Tag>],
1242 dest: &PlaceTy<'tcx, Tag>,
1243 success: AtomicRwOp,
1245 can_fail_spuriously: bool,
1246 ) -> InterpResult<'tcx> {
1247 let this = self.eval_context_mut();
1249 let [place, expect_old, new] = check_arg_count(args)?;
1250 let place = this.deref_operand(place)?;
1251 let expect_old = this.read_immediate(expect_old)?; // read as immediate for the sake of `binary_op()`
1252 let new = this.read_scalar(new)?;
1254 // Check alignment requirements. Atomics must always be aligned to their size,
1255 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
1257 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
1258 this.check_ptr_access_align(
1262 CheckInAllocMsg::MemoryAccessTest,
1265 let old = this.atomic_compare_exchange_scalar(
1271 can_fail_spuriously,
1274 // Return old value.
1275 this.write_immediate(old, dest)?;
1279 fn atomic_compare_exchange(
1281 args: &[OpTy<'tcx, Tag>],
1282 dest: &PlaceTy<'tcx, Tag>,
1283 success: AtomicRwOp,
1285 ) -> InterpResult<'tcx> {
1286 self.atomic_compare_exchange_impl(args, dest, success, fail, false)
1289 fn atomic_compare_exchange_weak(
1291 args: &[OpTy<'tcx, Tag>],
1292 dest: &PlaceTy<'tcx, Tag>,
1293 success: AtomicRwOp,
1295 ) -> InterpResult<'tcx> {
1296 self.atomic_compare_exchange_impl(args, dest, success, fail, true)
1299 fn float_to_int_unchecked<F>(
1302 dest_ty: ty::Ty<'tcx>,
1303 ) -> InterpResult<'tcx, Scalar<Tag>>
1305 F: Float + Into<Scalar<Tag>>,
1307 let this = self.eval_context_ref();
1309 // Step 1: cut off the fractional part of `f`. The result of this is
1310 // guaranteed to be precisely representable in IEEE floats.
1311 let f = f.round_to_integral(Round::TowardZero).value;
1313 // Step 2: Cast the truncated float to the target integer type and see if we lose any information in this step.
1314 Ok(match dest_ty.kind() {
1317 let size = Integer::from_uint_ty(this, *t).size();
1318 let res = f.to_u128(size.bits_usize());
1319 if res.status.is_empty() {
1320 // No status flags means there was no further rounding or other loss of precision.
1321 Scalar::from_uint(res.value, size)
1323 // `f` was not representable in this integer type.
1325 "`float_to_int_unchecked` intrinsic called on {} which cannot be represented in target type `{:?}`",
1333 let size = Integer::from_int_ty(this, *t).size();
1334 let res = f.to_i128(size.bits_usize());
1335 if res.status.is_empty() {
1336 // No status flags means there was no further rounding or other loss of precision.
1337 Scalar::from_int(res.value, size)
1339 // `f` was not representable in this integer type.
1341 "`float_to_int_unchecked` intrinsic called on {} which cannot be represented in target type `{:?}`",
1348 _ => bug!("`float_to_int_unchecked` called with non-int output type {:?}", dest_ty),
1354 left: &ImmTy<'tcx, Tag>,
1355 right: &ImmTy<'tcx, Tag>,
1356 ) -> InterpResult<'tcx, Scalar<Tag>> {
1357 assert_eq!(left.layout.ty, right.layout.ty);
1358 let ty::Float(float_ty) = left.layout.ty.kind() else {
1359 bug!("fmax operand is not a float")
1361 let left = left.to_scalar()?;
1362 let right = right.to_scalar()?;
1364 FloatTy::F32 => Scalar::from_f32(left.to_f32()?.max(right.to_f32()?)),
1365 FloatTy::F64 => Scalar::from_f64(left.to_f64()?.max(right.to_f64()?)),
1370 left: &ImmTy<'tcx, Tag>,
1371 right: &ImmTy<'tcx, Tag>,
1372 ) -> InterpResult<'tcx, Scalar<Tag>> {
1373 assert_eq!(left.layout.ty, right.layout.ty);
1374 let ty::Float(float_ty) = left.layout.ty.kind() else {
1375 bug!("fmin operand is not a float")
1377 let left = left.to_scalar()?;
1378 let right = right.to_scalar()?;
1380 FloatTy::F32 => Scalar::from_f32(left.to_f32()?.min(right.to_f32()?)),
1381 FloatTy::F64 => Scalar::from_f64(left.to_f64()?.min(right.to_f64()?)),
1385 fn bool_to_simd_element(b: bool, size: Size) -> Scalar<Tag> {
1386 // SIMD uses all-1 as pattern for "true"
1387 let val = if b { -1 } else { 0 };
1388 Scalar::from_int(val, size)
1391 fn simd_element_to_bool<'tcx>(elem: ImmTy<'tcx, Tag>) -> InterpResult<'tcx, bool> {
1392 let val = elem.to_scalar()?.to_int(elem.layout.size)?;
1396 _ => throw_ub_format!("each element of a SIMD mask must be all-0-bits or all-1-bits"),
1400 fn simd_bitmask_index(idx: u64, vec_len: u64, endianess: Endian) -> u64 {
1401 assert!(idx < vec_len);
1403 Endian::Little => idx,
1404 Endian::Big => vec_len - 1 - idx, // reverse order of bits