1 use std::convert::TryInto;
6 use rustc_apfloat::{Float, Round};
7 use rustc_middle::ty::layout::{HasParamEnv, IntegerExt, LayoutOf};
8 use rustc_middle::{mir, mir::BinOp, ty, ty::FloatTy};
9 use rustc_target::abi::{Align, Endian, HasDataLayout, Integer, Size};
12 use helpers::check_arg_count;
15 MirOp(mir::BinOp, bool),
20 impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
21 pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
24 instance: ty::Instance<'tcx>,
25 args: &[OpTy<'tcx, Tag>],
26 ret: Option<(&PlaceTy<'tcx, Tag>, mir::BasicBlock)>,
27 _unwind: StackPopUnwind,
28 ) -> InterpResult<'tcx> {
29 let this = self.eval_context_mut();
31 if this.emulate_intrinsic(instance, args, ret)? {
35 // All supported intrinsics have a return place.
36 let intrinsic_name = this.tcx.item_name(instance.def_id());
37 let intrinsic_name = intrinsic_name.as_str();
38 let (dest, ret) = match ret {
39 None => throw_unsup_format!("unimplemented (diverging) intrinsic: {}", intrinsic_name),
43 // Then handle terminating intrinsics.
44 match intrinsic_name {
45 // Miri overwriting CTFE intrinsics.
46 "ptr_guaranteed_eq" => {
47 let &[ref left, ref right] = check_arg_count(args)?;
48 let left = this.read_immediate(left)?;
49 let right = this.read_immediate(right)?;
50 this.binop_ignore_overflow(mir::BinOp::Eq, &left, &right, dest)?;
52 "ptr_guaranteed_ne" => {
53 let &[ref left, ref right] = check_arg_count(args)?;
54 let left = this.read_immediate(left)?;
55 let right = this.read_immediate(right)?;
56 this.binop_ignore_overflow(mir::BinOp::Ne, &left, &right, dest)?;
59 // For now, for compatibility with the run-time implementation of this, we just return null.
60 // See <https://github.com/rust-lang/rust/issues/93935>.
61 this.write_null(dest)?;
63 "const_deallocate" => {
67 // Raw memory accesses
69 let &[ref place] = check_arg_count(args)?;
70 let place = this.deref_operand(place)?;
71 this.copy_op(&place.into(), dest)?;
74 let &[ref place, ref dest] = check_arg_count(args)?;
75 let place = this.deref_operand(place)?;
76 this.copy_op(dest, &place.into())?;
79 "write_bytes" | "volatile_set_memory" => {
80 let &[ref ptr, ref val_byte, ref count] = check_arg_count(args)?;
81 let ty = instance.substs.type_at(0);
82 let ty_layout = this.layout_of(ty)?;
83 let val_byte = this.read_scalar(val_byte)?.to_u8()?;
84 let ptr = this.read_pointer(ptr)?;
85 let count = this.read_scalar(count)?.to_machine_usize(this)?;
86 let byte_count = ty_layout.size.checked_mul(count, this).ok_or_else(|| {
87 err_ub_format!("overflow computing total size of `{}`", intrinsic_name)
90 .write_bytes(ptr, iter::repeat(val_byte).take(byte_count.bytes() as usize))?;
93 // Floating-point operations
95 let &[ref f] = check_arg_count(args)?;
96 let f = this.read_scalar(f)?.to_f32()?;
97 // Can be implemented in soft-floats.
98 this.write_scalar(Scalar::from_f32(f.abs()), dest)?;
101 let &[ref f] = check_arg_count(args)?;
102 let f = this.read_scalar(f)?.to_f64()?;
103 // Can be implemented in soft-floats.
104 this.write_scalar(Scalar::from_f64(f.abs()), dest)?;
120 let &[ref f] = check_arg_count(args)?;
121 // FIXME: Using host floats.
122 let f = f32::from_bits(this.read_scalar(f)?.to_u32()?);
123 let f = match intrinsic_name {
126 "sqrtf32" => f.sqrt(),
128 "exp2f32" => f.exp2(),
130 "log10f32" => f.log10(),
131 "log2f32" => f.log2(),
132 "floorf32" => f.floor(),
133 "ceilf32" => f.ceil(),
134 "truncf32" => f.trunc(),
135 "roundf32" => f.round(),
138 this.write_scalar(Scalar::from_u32(f.to_bits()), dest)?;
155 let &[ref f] = check_arg_count(args)?;
156 // FIXME: Using host floats.
157 let f = f64::from_bits(this.read_scalar(f)?.to_u64()?);
158 let f = match intrinsic_name {
161 "sqrtf64" => f.sqrt(),
163 "exp2f64" => f.exp2(),
165 "log10f64" => f.log10(),
166 "log2f64" => f.log2(),
167 "floorf64" => f.floor(),
168 "ceilf64" => f.ceil(),
169 "truncf64" => f.trunc(),
170 "roundf64" => f.round(),
173 this.write_scalar(Scalar::from_u64(f.to_bits()), dest)?;
183 let &[ref a, ref b] = check_arg_count(args)?;
184 let a = this.read_immediate(a)?;
185 let b = this.read_immediate(b)?;
186 let op = match intrinsic_name {
187 "fadd_fast" => mir::BinOp::Add,
188 "fsub_fast" => mir::BinOp::Sub,
189 "fmul_fast" => mir::BinOp::Mul,
190 "fdiv_fast" => mir::BinOp::Div,
191 "frem_fast" => mir::BinOp::Rem,
194 let float_finite = |x: ImmTy<'tcx, _>| -> InterpResult<'tcx, bool> {
195 Ok(match x.layout.ty.kind() {
196 ty::Float(FloatTy::F32) => x.to_scalar()?.to_f32()?.is_finite(),
197 ty::Float(FloatTy::F64) => x.to_scalar()?.to_f64()?.is_finite(),
199 "`{}` called with non-float input type {:?}",
205 match (float_finite(a)?, float_finite(b)?) {
206 (false, false) => throw_ub_format!(
207 "`{}` intrinsic called with non-finite value as both parameters",
210 (false, _) => throw_ub_format!(
211 "`{}` intrinsic called with non-finite value as first parameter",
214 (_, false) => throw_ub_format!(
215 "`{}` intrinsic called with non-finite value as second parameter",
220 this.binop_ignore_overflow(op, &a, &b, dest)?;
228 let &[ref a, ref b] = check_arg_count(args)?;
229 let a = this.read_scalar(a)?.to_f32()?;
230 let b = this.read_scalar(b)?.to_f32()?;
231 let res = match intrinsic_name {
232 "minnumf32" => a.min(b),
233 "maxnumf32" => a.max(b),
234 "copysignf32" => a.copy_sign(b),
237 this.write_scalar(Scalar::from_f32(res), dest)?;
245 let &[ref a, ref b] = check_arg_count(args)?;
246 let a = this.read_scalar(a)?.to_f64()?;
247 let b = this.read_scalar(b)?.to_f64()?;
248 let res = match intrinsic_name {
249 "minnumf64" => a.min(b),
250 "maxnumf64" => a.max(b),
251 "copysignf64" => a.copy_sign(b),
254 this.write_scalar(Scalar::from_f64(res), dest)?;
258 let &[ref f, ref f2] = check_arg_count(args)?;
259 // FIXME: Using host floats.
260 let f = f32::from_bits(this.read_scalar(f)?.to_u32()?);
261 let f2 = f32::from_bits(this.read_scalar(f2)?.to_u32()?);
262 this.write_scalar(Scalar::from_u32(f.powf(f2).to_bits()), dest)?;
266 let &[ref f, ref f2] = check_arg_count(args)?;
267 // FIXME: Using host floats.
268 let f = f64::from_bits(this.read_scalar(f)?.to_u64()?);
269 let f2 = f64::from_bits(this.read_scalar(f2)?.to_u64()?);
270 this.write_scalar(Scalar::from_u64(f.powf(f2).to_bits()), dest)?;
274 let &[ref a, ref b, ref c] = check_arg_count(args)?;
275 let a = this.read_scalar(a)?.to_f32()?;
276 let b = this.read_scalar(b)?.to_f32()?;
277 let c = this.read_scalar(c)?.to_f32()?;
278 let res = a.mul_add(b, c).value;
279 this.write_scalar(Scalar::from_f32(res), dest)?;
283 let &[ref a, ref b, ref c] = check_arg_count(args)?;
284 let a = this.read_scalar(a)?.to_f64()?;
285 let b = this.read_scalar(b)?.to_f64()?;
286 let c = this.read_scalar(c)?.to_f64()?;
287 let res = a.mul_add(b, c).value;
288 this.write_scalar(Scalar::from_f64(res), dest)?;
292 let &[ref f, ref i] = check_arg_count(args)?;
293 // FIXME: Using host floats.
294 let f = f32::from_bits(this.read_scalar(f)?.to_u32()?);
295 let i = this.read_scalar(i)?.to_i32()?;
296 this.write_scalar(Scalar::from_u32(f.powi(i).to_bits()), dest)?;
300 let &[ref f, ref i] = check_arg_count(args)?;
301 // FIXME: Using host floats.
302 let f = f64::from_bits(this.read_scalar(f)?.to_u64()?);
303 let i = this.read_scalar(i)?.to_i32()?;
304 this.write_scalar(Scalar::from_u64(f.powi(i).to_bits()), dest)?;
307 "float_to_int_unchecked" => {
308 let &[ref val] = check_arg_count(args)?;
309 let val = this.read_immediate(val)?;
311 let res = match val.layout.ty.kind() {
312 ty::Float(FloatTy::F32) =>
313 this.float_to_int_unchecked(val.to_scalar()?.to_f32()?, dest.layout.ty)?,
314 ty::Float(FloatTy::F64) =>
315 this.float_to_int_unchecked(val.to_scalar()?.to_f64()?, dest.layout.ty)?,
318 "`float_to_int_unchecked` called with non-float input type {:?}",
323 this.write_scalar(res, dest)?;
335 let &[ref op] = check_arg_count(args)?;
336 let (op, op_len) = this.operand_to_simd(op)?;
337 let (dest, dest_len) = this.place_to_simd(dest)?;
339 assert_eq!(dest_len, op_len);
341 #[derive(Copy, Clone)]
349 #[derive(Copy, Clone)]
355 let which = match intrinsic_name {
356 "simd_neg" => Op::MirOp(mir::UnOp::Neg),
357 "simd_fabs" => Op::Abs,
358 "simd_ceil" => Op::HostOp(HostFloatOp::Ceil),
359 "simd_floor" => Op::HostOp(HostFloatOp::Floor),
360 "simd_round" => Op::HostOp(HostFloatOp::Round),
361 "simd_trunc" => Op::HostOp(HostFloatOp::Trunc),
362 "simd_fsqrt" => Op::HostOp(HostFloatOp::Sqrt),
366 for i in 0..dest_len {
367 let op = this.read_immediate(&this.mplace_index(&op, i)?.into())?;
368 let dest = this.mplace_index(&dest, i)?;
369 let val = match which {
370 Op::MirOp(mir_op) => this.unary_op(mir_op, &op)?.to_scalar()?,
372 // Works for f32 and f64.
373 let ty::Float(float_ty) = op.layout.ty.kind() else {
374 bug!("{} operand is not a float", intrinsic_name)
376 let op = op.to_scalar()?;
378 FloatTy::F32 => Scalar::from_f32(op.to_f32()?.abs()),
379 FloatTy::F64 => Scalar::from_f64(op.to_f64()?.abs()),
382 Op::HostOp(host_op) => {
383 let ty::Float(float_ty) = op.layout.ty.kind() else {
384 bug!("{} operand is not a float", intrinsic_name)
386 // FIXME using host floats
389 let f = f32::from_bits(op.to_scalar()?.to_u32()?);
390 let res = match host_op {
391 HostFloatOp::Ceil => f.ceil(),
392 HostFloatOp::Floor => f.floor(),
393 HostFloatOp::Round => f.round(),
394 HostFloatOp::Trunc => f.trunc(),
395 HostFloatOp::Sqrt => f.sqrt(),
397 Scalar::from_u32(res.to_bits())
400 let f = f64::from_bits(op.to_scalar()?.to_u64()?);
401 let res = match host_op {
402 HostFloatOp::Ceil => f.ceil(),
403 HostFloatOp::Floor => f.floor(),
404 HostFloatOp::Round => f.round(),
405 HostFloatOp::Trunc => f.trunc(),
406 HostFloatOp::Sqrt => f.sqrt(),
408 Scalar::from_u64(res.to_bits())
414 this.write_scalar(val, &dest.into())?;
436 | "simd_saturating_add"
437 | "simd_saturating_sub" => {
440 let &[ref left, ref right] = check_arg_count(args)?;
441 let (left, left_len) = this.operand_to_simd(left)?;
442 let (right, right_len) = this.operand_to_simd(right)?;
443 let (dest, dest_len) = this.place_to_simd(dest)?;
445 assert_eq!(dest_len, left_len);
446 assert_eq!(dest_len, right_len);
454 let which = match intrinsic_name {
455 "simd_add" => Op::MirOp(BinOp::Add),
456 "simd_sub" => Op::MirOp(BinOp::Sub),
457 "simd_mul" => Op::MirOp(BinOp::Mul),
458 "simd_div" => Op::MirOp(BinOp::Div),
459 "simd_rem" => Op::MirOp(BinOp::Rem),
460 "simd_shl" => Op::MirOp(BinOp::Shl),
461 "simd_shr" => Op::MirOp(BinOp::Shr),
462 "simd_and" => Op::MirOp(BinOp::BitAnd),
463 "simd_or" => Op::MirOp(BinOp::BitOr),
464 "simd_xor" => Op::MirOp(BinOp::BitXor),
465 "simd_eq" => Op::MirOp(BinOp::Eq),
466 "simd_ne" => Op::MirOp(BinOp::Ne),
467 "simd_lt" => Op::MirOp(BinOp::Lt),
468 "simd_le" => Op::MirOp(BinOp::Le),
469 "simd_gt" => Op::MirOp(BinOp::Gt),
470 "simd_ge" => Op::MirOp(BinOp::Ge),
471 "simd_fmax" => Op::FMax,
472 "simd_fmin" => Op::FMin,
473 "simd_saturating_add" => Op::SaturatingOp(BinOp::Add),
474 "simd_saturating_sub" => Op::SaturatingOp(BinOp::Sub),
478 for i in 0..dest_len {
479 let left = this.read_immediate(&this.mplace_index(&left, i)?.into())?;
480 let right = this.read_immediate(&this.mplace_index(&right, i)?.into())?;
481 let dest = this.mplace_index(&dest, i)?;
482 let val = match which {
483 Op::MirOp(mir_op) => {
484 let (val, overflowed, ty) = this.overflowing_binary_op(mir_op, &left, &right)?;
485 if matches!(mir_op, BinOp::Shl | BinOp::Shr) {
486 // Shifts have extra UB as SIMD operations that the MIR binop does not have.
487 // See <https://github.com/rust-lang/rust/issues/91237>.
489 let r_val = right.to_scalar()?.to_bits(right.layout.size)?;
490 throw_ub_format!("overflowing shift by {} in `{}` in SIMD lane {}", r_val, intrinsic_name, i);
493 if matches!(mir_op, BinOp::Eq | BinOp::Ne | BinOp::Lt | BinOp::Le | BinOp::Gt | BinOp::Ge) {
494 // Special handling for boolean-returning operations
495 assert_eq!(ty, this.tcx.types.bool);
496 let val = val.to_bool().unwrap();
497 bool_to_simd_element(val, dest.layout.size)
499 assert_ne!(ty, this.tcx.types.bool);
500 assert_eq!(ty, dest.layout.ty);
505 fmax_op(&left, &right)?
508 fmin_op(&left, &right)?
510 Op::SaturatingOp(mir_op) => {
511 this.saturating_arith(mir_op, &left, &right)?
514 this.write_scalar(val, &dest.into())?;
518 let &[ref a, ref b, ref c] = check_arg_count(args)?;
519 let (a, a_len) = this.operand_to_simd(a)?;
520 let (b, b_len) = this.operand_to_simd(b)?;
521 let (c, c_len) = this.operand_to_simd(c)?;
522 let (dest, dest_len) = this.place_to_simd(dest)?;
524 assert_eq!(dest_len, a_len);
525 assert_eq!(dest_len, b_len);
526 assert_eq!(dest_len, c_len);
528 for i in 0..dest_len {
529 let a = this.read_immediate(&this.mplace_index(&a, i)?.into())?.to_scalar()?;
530 let b = this.read_immediate(&this.mplace_index(&b, i)?.into())?.to_scalar()?;
531 let c = this.read_immediate(&this.mplace_index(&c, i)?.into())?.to_scalar()?;
532 let dest = this.mplace_index(&dest, i)?;
534 // Works for f32 and f64.
535 let ty::Float(float_ty) = dest.layout.ty.kind() else {
536 bug!("{} operand is not a float", intrinsic_name)
538 let val = match float_ty {
540 Scalar::from_f32(a.to_f32()?.mul_add(b.to_f32()?, c.to_f32()?).value),
542 Scalar::from_f64(a.to_f64()?.mul_add(b.to_f64()?, c.to_f64()?).value),
544 this.write_scalar(val, &dest.into())?;
554 | "simd_reduce_min" => {
557 let &[ref op] = check_arg_count(args)?;
558 let (op, op_len) = this.operand_to_simd(op)?;
561 |b| ImmTy::from_scalar(Scalar::from_bool(b), this.machine.layouts.bool);
569 let which = match intrinsic_name {
570 "simd_reduce_and" => Op::MirOp(BinOp::BitAnd),
571 "simd_reduce_or" => Op::MirOp(BinOp::BitOr),
572 "simd_reduce_xor" => Op::MirOp(BinOp::BitXor),
573 "simd_reduce_any" => Op::MirOpBool(BinOp::BitOr),
574 "simd_reduce_all" => Op::MirOpBool(BinOp::BitAnd),
575 "simd_reduce_max" => Op::Max,
576 "simd_reduce_min" => Op::Min,
580 // Initialize with first lane, then proceed with the rest.
581 let mut res = this.read_immediate(&this.mplace_index(&op, 0)?.into())?;
582 if matches!(which, Op::MirOpBool(_)) {
583 // Convert to `bool` scalar.
584 res = imm_from_bool(simd_element_to_bool(res)?);
587 let op = this.read_immediate(&this.mplace_index(&op, i)?.into())?;
589 Op::MirOp(mir_op) => {
590 this.binary_op(mir_op, &res, &op)?
592 Op::MirOpBool(mir_op) => {
593 let op = imm_from_bool(simd_element_to_bool(op)?);
594 this.binary_op(mir_op, &res, &op)?
597 if matches!(res.layout.ty.kind(), ty::Float(_)) {
598 ImmTy::from_scalar(fmax_op(&res, &op)?, res.layout)
600 // Just boring integers, so NaNs to worry about
601 if this.binary_op(BinOp::Ge, &res, &op)?.to_scalar()?.to_bool()? {
609 if matches!(res.layout.ty.kind(), ty::Float(_)) {
610 ImmTy::from_scalar(fmin_op(&res, &op)?, res.layout)
612 // Just boring integers, so NaNs to worry about
613 if this.binary_op(BinOp::Le, &res, &op)?.to_scalar()?.to_bool()? {
622 this.write_immediate(*res, dest)?;
625 | "simd_reduce_add_ordered"
626 | "simd_reduce_mul_ordered" => {
629 let &[ref op, ref init] = check_arg_count(args)?;
630 let (op, op_len) = this.operand_to_simd(op)?;
631 let init = this.read_immediate(init)?;
633 let mir_op = match intrinsic_name {
634 "simd_reduce_add_ordered" => BinOp::Add,
635 "simd_reduce_mul_ordered" => BinOp::Mul,
641 let op = this.read_immediate(&this.mplace_index(&op, i)?.into())?;
642 res = this.binary_op(mir_op, &res, &op)?;
644 this.write_immediate(*res, dest)?;
647 let &[ref mask, ref yes, ref no] = check_arg_count(args)?;
648 let (mask, mask_len) = this.operand_to_simd(mask)?;
649 let (yes, yes_len) = this.operand_to_simd(yes)?;
650 let (no, no_len) = this.operand_to_simd(no)?;
651 let (dest, dest_len) = this.place_to_simd(dest)?;
653 assert_eq!(dest_len, mask_len);
654 assert_eq!(dest_len, yes_len);
655 assert_eq!(dest_len, no_len);
657 for i in 0..dest_len {
658 let mask = this.read_immediate(&this.mplace_index(&mask, i)?.into())?;
659 let yes = this.read_immediate(&this.mplace_index(&yes, i)?.into())?;
660 let no = this.read_immediate(&this.mplace_index(&no, i)?.into())?;
661 let dest = this.mplace_index(&dest, i)?;
663 let val = if simd_element_to_bool(mask)? { yes } else { no };
664 this.write_immediate(*val, &dest.into())?;
667 "simd_select_bitmask" => {
668 let &[ref mask, ref yes, ref no] = check_arg_count(args)?;
669 let (yes, yes_len) = this.operand_to_simd(yes)?;
670 let (no, no_len) = this.operand_to_simd(no)?;
671 let (dest, dest_len) = this.place_to_simd(dest)?;
673 assert!(mask.layout.ty.is_integral());
674 assert_eq!(dest_len.max(8), mask.layout.size.bits());
675 assert!(dest_len <= 64);
676 assert_eq!(dest_len, yes_len);
677 assert_eq!(dest_len, no_len);
682 .to_bits(mask.layout.size)?
685 for i in 0..dest_len {
687 mask & (1 << simd_bitmask_index(i, dest_len, this.data_layout().endian));
688 let yes = this.read_immediate(&this.mplace_index(&yes, i)?.into())?;
689 let no = this.read_immediate(&this.mplace_index(&no, i)?.into())?;
690 let dest = this.mplace_index(&dest, i)?;
692 let val = if mask != 0 { yes } else { no };
693 this.write_immediate(*val, &dest.into())?;
697 "simd_cast" | "simd_as" => {
698 let &[ref op] = check_arg_count(args)?;
699 let (op, op_len) = this.operand_to_simd(op)?;
700 let (dest, dest_len) = this.place_to_simd(dest)?;
702 assert_eq!(dest_len, op_len);
704 let safe_cast = intrinsic_name == "simd_as";
706 for i in 0..dest_len {
707 let op = this.read_immediate(&this.mplace_index(&op, i)?.into())?;
708 let dest = this.mplace_index(&dest, i)?;
710 let val = match (op.layout.ty.kind(), dest.layout.ty.kind()) {
711 // Int-to-(int|float): always safe
712 (ty::Int(_) | ty::Uint(_), ty::Int(_) | ty::Uint(_) | ty::Float(_)) =>
713 this.misc_cast(&op, dest.layout.ty)?,
714 // Float-to-float: always safe
715 (ty::Float(_), ty::Float(_)) =>
716 this.misc_cast(&op, dest.layout.ty)?,
717 // Float-to-int in safe mode
718 (ty::Float(_), ty::Int(_) | ty::Uint(_)) if safe_cast =>
719 this.misc_cast(&op, dest.layout.ty)?,
720 // Float-to-int in unchecked mode
721 (ty::Float(FloatTy::F32), ty::Int(_) | ty::Uint(_)) if !safe_cast =>
722 this.float_to_int_unchecked(op.to_scalar()?.to_f32()?, dest.layout.ty)?.into(),
723 (ty::Float(FloatTy::F64), ty::Int(_) | ty::Uint(_)) if !safe_cast =>
724 this.float_to_int_unchecked(op.to_scalar()?.to_f64()?, dest.layout.ty)?.into(),
727 "Unsupported SIMD cast from element type {} to {}",
732 this.write_immediate(val, &dest.into())?;
736 let &[ref left, ref right, ref index] = check_arg_count(args)?;
737 let (left, left_len) = this.operand_to_simd(left)?;
738 let (right, right_len) = this.operand_to_simd(right)?;
739 let (dest, dest_len) = this.place_to_simd(dest)?;
741 // `index` is an array, not a SIMD type
742 let ty::Array(_, index_len) = index.layout.ty.kind() else {
743 bug!("simd_shuffle index argument has non-array type {}", index.layout.ty)
745 let index_len = index_len.eval_usize(*this.tcx, this.param_env());
747 assert_eq!(left_len, right_len);
748 assert_eq!(index_len, dest_len);
750 for i in 0..dest_len {
751 let src_index: u64 = this
752 .read_immediate(&this.operand_index(&index, i)?.into())?
756 let dest = this.mplace_index(&dest, i)?;
758 let val = if src_index < left_len {
759 this.read_immediate(&this.mplace_index(&left, src_index)?.into())?
760 } else if src_index < left_len.checked_add(right_len).unwrap() {
762 &this.mplace_index(&right, src_index - left_len)?.into(),
766 "simd_shuffle index {} is out of bounds for 2 vectors of size {}",
771 this.write_immediate(*val, &dest.into())?;
775 let &[ref passthru, ref ptrs, ref mask] = check_arg_count(args)?;
776 let (passthru, passthru_len) = this.operand_to_simd(passthru)?;
777 let (ptrs, ptrs_len) = this.operand_to_simd(ptrs)?;
778 let (mask, mask_len) = this.operand_to_simd(mask)?;
779 let (dest, dest_len) = this.place_to_simd(dest)?;
781 assert_eq!(dest_len, passthru_len);
782 assert_eq!(dest_len, ptrs_len);
783 assert_eq!(dest_len, mask_len);
785 for i in 0..dest_len {
786 let passthru = this.read_immediate(&this.mplace_index(&passthru, i)?.into())?;
787 let ptr = this.read_immediate(&this.mplace_index(&ptrs, i)?.into())?;
788 let mask = this.read_immediate(&this.mplace_index(&mask, i)?.into())?;
789 let dest = this.mplace_index(&dest, i)?;
791 let val = if simd_element_to_bool(mask)? {
792 let place = this.deref_operand(&ptr.into())?;
793 this.read_immediate(&place.into())?
797 this.write_immediate(*val, &dest.into())?;
801 let &[ref value, ref ptrs, ref mask] = check_arg_count(args)?;
802 let (value, value_len) = this.operand_to_simd(value)?;
803 let (ptrs, ptrs_len) = this.operand_to_simd(ptrs)?;
804 let (mask, mask_len) = this.operand_to_simd(mask)?;
806 assert_eq!(ptrs_len, value_len);
807 assert_eq!(ptrs_len, mask_len);
809 for i in 0..ptrs_len {
810 let value = this.read_immediate(&this.mplace_index(&value, i)?.into())?;
811 let ptr = this.read_immediate(&this.mplace_index(&ptrs, i)?.into())?;
812 let mask = this.read_immediate(&this.mplace_index(&mask, i)?.into())?;
814 if simd_element_to_bool(mask)? {
815 let place = this.deref_operand(&ptr.into())?;
816 this.write_immediate(*value, &place.into())?;
821 let &[ref op] = check_arg_count(args)?;
822 let (op, op_len) = this.operand_to_simd(op)?;
824 assert!(dest.layout.ty.is_integral());
825 assert_eq!(op_len.max(8), dest.layout.size.bits());
826 assert!(op_len <= 64);
830 let op = this.read_immediate(&this.mplace_index(&op, i)?.into())?;
831 if simd_element_to_bool(op)? {
832 res |= 1 << simd_bitmask_index(i, op_len, this.data_layout().endian);
835 this.write_int(res, dest)?;
839 "atomic_load" => this.atomic_load(args, dest, AtomicReadOp::SeqCst)?,
840 "atomic_load_relaxed" => this.atomic_load(args, dest, AtomicReadOp::Relaxed)?,
841 "atomic_load_acq" => this.atomic_load(args, dest, AtomicReadOp::Acquire)?,
843 "atomic_store" => this.atomic_store(args, AtomicWriteOp::SeqCst)?,
844 "atomic_store_relaxed" => this.atomic_store(args, AtomicWriteOp::Relaxed)?,
845 "atomic_store_rel" => this.atomic_store(args, AtomicWriteOp::Release)?,
847 "atomic_fence_acq" => this.atomic_fence(args, AtomicFenceOp::Acquire)?,
848 "atomic_fence_rel" => this.atomic_fence(args, AtomicFenceOp::Release)?,
849 "atomic_fence_acqrel" => this.atomic_fence(args, AtomicFenceOp::AcqRel)?,
850 "atomic_fence" => this.atomic_fence(args, AtomicFenceOp::SeqCst)?,
852 "atomic_singlethreadfence_acq" => this.compiler_fence(args, AtomicFenceOp::Acquire)?,
853 "atomic_singlethreadfence_rel" => this.compiler_fence(args, AtomicFenceOp::Release)?,
854 "atomic_singlethreadfence_acqrel" =>
855 this.compiler_fence(args, AtomicFenceOp::AcqRel)?,
856 "atomic_singlethreadfence" => this.compiler_fence(args, AtomicFenceOp::SeqCst)?,
858 "atomic_xchg" => this.atomic_exchange(args, dest, AtomicRwOp::SeqCst)?,
859 "atomic_xchg_acq" => this.atomic_exchange(args, dest, AtomicRwOp::Acquire)?,
860 "atomic_xchg_rel" => this.atomic_exchange(args, dest, AtomicRwOp::Release)?,
861 "atomic_xchg_acqrel" => this.atomic_exchange(args, dest, AtomicRwOp::AcqRel)?,
862 "atomic_xchg_relaxed" => this.atomic_exchange(args, dest, AtomicRwOp::Relaxed)?,
866 this.atomic_compare_exchange(args, dest, AtomicRwOp::SeqCst, AtomicReadOp::SeqCst)?,
868 "atomic_cxchg_acq" =>
869 this.atomic_compare_exchange(args, dest, AtomicRwOp::Acquire, AtomicReadOp::Acquire)?,
871 "atomic_cxchg_rel" =>
872 this.atomic_compare_exchange(args, dest, AtomicRwOp::Release, AtomicReadOp::Relaxed)?,
874 "atomic_cxchg_acqrel" =>
875 this.atomic_compare_exchange(args, dest, AtomicRwOp::AcqRel, AtomicReadOp::Acquire)?,
877 "atomic_cxchg_relaxed" =>
878 this.atomic_compare_exchange(args, dest, AtomicRwOp::Relaxed, AtomicReadOp::Relaxed)?,
880 "atomic_cxchg_acq_failrelaxed" =>
881 this.atomic_compare_exchange(args, dest, AtomicRwOp::Acquire, AtomicReadOp::Relaxed)?,
883 "atomic_cxchg_acqrel_failrelaxed" =>
884 this.atomic_compare_exchange(args, dest, AtomicRwOp::AcqRel, AtomicReadOp::Relaxed)?,
886 "atomic_cxchg_failrelaxed" =>
887 this.atomic_compare_exchange(args, dest, AtomicRwOp::SeqCst, AtomicReadOp::Relaxed)?,
889 "atomic_cxchg_failacq" =>
890 this.atomic_compare_exchange(args, dest, AtomicRwOp::SeqCst, AtomicReadOp::Acquire)?,
893 "atomic_cxchgweak" =>
894 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::SeqCst, AtomicReadOp::SeqCst)?,
896 "atomic_cxchgweak_acq" =>
897 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::Acquire, AtomicReadOp::Acquire)?,
899 "atomic_cxchgweak_rel" =>
900 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::Release, AtomicReadOp::Relaxed)?,
902 "atomic_cxchgweak_acqrel" =>
903 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::AcqRel, AtomicReadOp::Acquire)?,
905 "atomic_cxchgweak_relaxed" =>
906 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::Relaxed, AtomicReadOp::Relaxed)?,
908 "atomic_cxchgweak_acq_failrelaxed" =>
909 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::Acquire, AtomicReadOp::Relaxed)?,
911 "atomic_cxchgweak_acqrel_failrelaxed" =>
912 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::AcqRel, AtomicReadOp::Relaxed)?,
914 "atomic_cxchgweak_failrelaxed" =>
915 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::SeqCst, AtomicReadOp::Relaxed)?,
917 "atomic_cxchgweak_failacq" =>
918 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::SeqCst, AtomicReadOp::Acquire)?,
922 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOp::SeqCst)?,
925 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOp::Acquire)?,
928 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOp::Release)?,
930 "atomic_or_acqrel" =>
931 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOp::AcqRel)?,
933 "atomic_or_relaxed" =>
934 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOp::Relaxed)?,
937 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOp::SeqCst)?,
940 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOp::Acquire)?,
943 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOp::Release)?,
945 "atomic_xor_acqrel" =>
946 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOp::AcqRel)?,
948 "atomic_xor_relaxed" =>
949 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOp::Relaxed)?,
952 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOp::SeqCst)?,
955 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOp::Acquire)?,
958 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOp::Release)?,
960 "atomic_and_acqrel" =>
961 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOp::AcqRel)?,
963 "atomic_and_relaxed" =>
964 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOp::Relaxed)?,
967 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOp::SeqCst)?,
970 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOp::Acquire)?,
973 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOp::Release)?,
975 "atomic_nand_acqrel" =>
976 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOp::AcqRel)?,
978 "atomic_nand_relaxed" =>
979 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOp::Relaxed)?,
982 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOp::SeqCst)?,
985 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOp::Acquire)?,
988 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOp::Release)?,
990 "atomic_xadd_acqrel" =>
991 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOp::AcqRel)?,
993 "atomic_xadd_relaxed" =>
994 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOp::Relaxed)?,
997 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOp::SeqCst)?,
1000 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOp::Acquire)?,
1002 "atomic_xsub_rel" =>
1003 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOp::Release)?,
1005 "atomic_xsub_acqrel" =>
1006 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOp::AcqRel)?,
1008 "atomic_xsub_relaxed" =>
1009 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOp::Relaxed)?,
1010 "atomic_min" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::SeqCst)?,
1011 "atomic_min_acq" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::Acquire)?,
1012 "atomic_min_rel" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::Release)?,
1013 "atomic_min_acqrel" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::AcqRel)?,
1014 "atomic_min_relaxed" =>
1015 this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::Relaxed)?,
1016 "atomic_max" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::SeqCst)?,
1017 "atomic_max_acq" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::Acquire)?,
1018 "atomic_max_rel" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::Release)?,
1019 "atomic_max_acqrel" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::AcqRel)?,
1020 "atomic_max_relaxed" =>
1021 this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::Relaxed)?,
1022 "atomic_umin" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::SeqCst)?,
1023 "atomic_umin_acq" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::Acquire)?,
1024 "atomic_umin_rel" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::Release)?,
1025 "atomic_umin_acqrel" =>
1026 this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::AcqRel)?,
1027 "atomic_umin_relaxed" =>
1028 this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::Relaxed)?,
1029 "atomic_umax" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::SeqCst)?,
1030 "atomic_umax_acq" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::Acquire)?,
1031 "atomic_umax_rel" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::Release)?,
1032 "atomic_umax_acqrel" =>
1033 this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::AcqRel)?,
1034 "atomic_umax_relaxed" =>
1035 this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::Relaxed)?,
1039 let &[ref num, ref denom] = check_arg_count(args)?;
1040 this.exact_div(&this.read_immediate(num)?, &this.read_immediate(denom)?, dest)?;
1043 "try" => return this.handle_try(args, dest, ret),
1046 let &[] = check_arg_count(args)?;
1047 // normally this would raise a SIGTRAP, which aborts if no debugger is connected
1048 throw_machine_stop!(TerminationInfo::Abort("Trace/breakpoint trap".to_string()))
1051 name => throw_unsup_format!("unimplemented intrinsic: {}", name),
1054 trace!("{:?}", this.dump_place(**dest));
1055 this.go_to_block(ret);
1061 args: &[OpTy<'tcx, Tag>],
1062 dest: &PlaceTy<'tcx, Tag>,
1063 atomic: AtomicReadOp,
1064 ) -> InterpResult<'tcx> {
1065 let this = self.eval_context_mut();
1067 let &[ref place] = check_arg_count(args)?;
1068 let place = this.deref_operand(place)?;
1070 // make sure it fits into a scalar; otherwise it cannot be atomic
1071 let val = this.read_scalar_atomic(&place, atomic)?;
1073 // Check alignment requirements. Atomics must always be aligned to their size,
1074 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
1076 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
1077 this.memory.check_ptr_access_align(
1081 CheckInAllocMsg::MemoryAccessTest,
1083 // Perform regular access.
1084 this.write_scalar(val, dest)?;
1090 args: &[OpTy<'tcx, Tag>],
1091 atomic: AtomicWriteOp,
1092 ) -> InterpResult<'tcx> {
1093 let this = self.eval_context_mut();
1095 let &[ref place, ref val] = check_arg_count(args)?;
1096 let place = this.deref_operand(place)?;
1097 let val = this.read_scalar(val)?; // make sure it fits into a scalar; otherwise it cannot be atomic
1099 // Check alignment requirements. Atomics must always be aligned to their size,
1100 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
1102 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
1103 this.memory.check_ptr_access_align(
1107 CheckInAllocMsg::MemoryAccessTest,
1110 // Perform atomic store
1111 this.write_scalar_atomic(val, &place, atomic)?;
1117 args: &[OpTy<'tcx, Tag>],
1118 atomic: AtomicFenceOp,
1119 ) -> InterpResult<'tcx> {
1120 let &[] = check_arg_count(args)?;
1122 //FIXME: compiler fences are currently ignored
1128 args: &[OpTy<'tcx, Tag>],
1129 atomic: AtomicFenceOp,
1130 ) -> InterpResult<'tcx> {
1131 let this = self.eval_context_mut();
1132 let &[] = check_arg_count(args)?;
1133 this.validate_atomic_fence(atomic)?;
1139 args: &[OpTy<'tcx, Tag>],
1140 dest: &PlaceTy<'tcx, Tag>,
1141 atomic_op: AtomicOp,
1143 ) -> InterpResult<'tcx> {
1144 let this = self.eval_context_mut();
1146 let &[ref place, ref rhs] = check_arg_count(args)?;
1147 let place = this.deref_operand(place)?;
1149 if !place.layout.ty.is_integral() {
1150 bug!("Atomic arithmetic operations only work on integer types");
1152 let rhs = this.read_immediate(rhs)?;
1154 // Check alignment requirements. Atomics must always be aligned to their size,
1155 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
1157 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
1158 this.memory.check_ptr_access_align(
1162 CheckInAllocMsg::MemoryAccessTest,
1167 let old = this.atomic_min_max_scalar(&place, rhs, true, atomic)?;
1168 this.write_immediate(*old, &dest)?; // old value is returned
1172 let old = this.atomic_min_max_scalar(&place, rhs, false, atomic)?;
1173 this.write_immediate(*old, &dest)?; // old value is returned
1176 AtomicOp::MirOp(op, neg) => {
1177 let old = this.atomic_op_immediate(&place, &rhs, op, neg, atomic)?;
1178 this.write_immediate(*old, dest)?; // old value is returned
1186 args: &[OpTy<'tcx, Tag>],
1187 dest: &PlaceTy<'tcx, Tag>,
1189 ) -> InterpResult<'tcx> {
1190 let this = self.eval_context_mut();
1192 let &[ref place, ref new] = check_arg_count(args)?;
1193 let place = this.deref_operand(place)?;
1194 let new = this.read_scalar(new)?;
1196 // Check alignment requirements. Atomics must always be aligned to their size,
1197 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
1199 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
1200 this.memory.check_ptr_access_align(
1204 CheckInAllocMsg::MemoryAccessTest,
1207 let old = this.atomic_exchange_scalar(&place, new, atomic)?;
1208 this.write_scalar(old, dest)?; // old value is returned
1212 fn atomic_compare_exchange_impl(
1214 args: &[OpTy<'tcx, Tag>],
1215 dest: &PlaceTy<'tcx, Tag>,
1216 success: AtomicRwOp,
1218 can_fail_spuriously: bool,
1219 ) -> InterpResult<'tcx> {
1220 let this = self.eval_context_mut();
1222 let &[ref place, ref expect_old, ref new] = check_arg_count(args)?;
1223 let place = this.deref_operand(place)?;
1224 let expect_old = this.read_immediate(expect_old)?; // read as immediate for the sake of `binary_op()`
1225 let new = this.read_scalar(new)?;
1227 // Check alignment requirements. Atomics must always be aligned to their size,
1228 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
1230 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
1231 this.memory.check_ptr_access_align(
1235 CheckInAllocMsg::MemoryAccessTest,
1238 let old = this.atomic_compare_exchange_scalar(
1244 can_fail_spuriously,
1247 // Return old value.
1248 this.write_immediate(old, dest)?;
1252 fn atomic_compare_exchange(
1254 args: &[OpTy<'tcx, Tag>],
1255 dest: &PlaceTy<'tcx, Tag>,
1256 success: AtomicRwOp,
1258 ) -> InterpResult<'tcx> {
1259 self.atomic_compare_exchange_impl(args, dest, success, fail, false)
1262 fn atomic_compare_exchange_weak(
1264 args: &[OpTy<'tcx, Tag>],
1265 dest: &PlaceTy<'tcx, Tag>,
1266 success: AtomicRwOp,
1268 ) -> InterpResult<'tcx> {
1269 self.atomic_compare_exchange_impl(args, dest, success, fail, true)
1272 fn float_to_int_unchecked<F>(
1275 dest_ty: ty::Ty<'tcx>,
1276 ) -> InterpResult<'tcx, Scalar<Tag>>
1278 F: Float + Into<Scalar<Tag>>,
1280 let this = self.eval_context_ref();
1282 // Step 1: cut off the fractional part of `f`. The result of this is
1283 // guaranteed to be precisely representable in IEEE floats.
1284 let f = f.round_to_integral(Round::TowardZero).value;
1286 // Step 2: Cast the truncated float to the target integer type and see if we lose any information in this step.
1287 Ok(match dest_ty.kind() {
1290 let size = Integer::from_uint_ty(this, *t).size();
1291 let res = f.to_u128(size.bits_usize());
1292 if res.status.is_empty() {
1293 // No status flags means there was no further rounding or other loss of precision.
1294 Scalar::from_uint(res.value, size)
1296 // `f` was not representable in this integer type.
1298 "`float_to_int_unchecked` intrinsic called on {} which cannot be represented in target type `{:?}`",
1306 let size = Integer::from_int_ty(this, *t).size();
1307 let res = f.to_i128(size.bits_usize());
1308 if res.status.is_empty() {
1309 // No status flags means there was no further rounding or other loss of precision.
1310 Scalar::from_int(res.value, size)
1312 // `f` was not representable in this integer type.
1314 "`float_to_int_unchecked` intrinsic called on {} which cannot be represented in target type `{:?}`",
1321 _ => bug!("`float_to_int_unchecked` called with non-int output type {:?}", dest_ty),
1327 left: &ImmTy<'tcx, Tag>,
1328 right: &ImmTy<'tcx, Tag>,
1329 ) -> InterpResult<'tcx, Scalar<Tag>> {
1330 assert_eq!(left.layout.ty, right.layout.ty);
1331 let ty::Float(float_ty) = left.layout.ty.kind() else {
1332 bug!("fmax operand is not a float")
1334 let left = left.to_scalar()?;
1335 let right = right.to_scalar()?;
1337 FloatTy::F32 => Scalar::from_f32(left.to_f32()?.max(right.to_f32()?)),
1338 FloatTy::F64 => Scalar::from_f64(left.to_f64()?.max(right.to_f64()?)),
1343 left: &ImmTy<'tcx, Tag>,
1344 right: &ImmTy<'tcx, Tag>,
1345 ) -> InterpResult<'tcx, Scalar<Tag>> {
1346 assert_eq!(left.layout.ty, right.layout.ty);
1347 let ty::Float(float_ty) = left.layout.ty.kind() else {
1348 bug!("fmin operand is not a float")
1350 let left = left.to_scalar()?;
1351 let right = right.to_scalar()?;
1353 FloatTy::F32 => Scalar::from_f32(left.to_f32()?.min(right.to_f32()?)),
1354 FloatTy::F64 => Scalar::from_f64(left.to_f64()?.min(right.to_f64()?)),
1358 fn bool_to_simd_element(b: bool, size: Size) -> Scalar<Tag> {
1359 // SIMD uses all-1 as pattern for "true"
1360 let val = if b { -1 } else { 0 };
1361 Scalar::from_int(val, size)
1364 fn simd_element_to_bool<'tcx>(elem: ImmTy<'tcx, Tag>) -> InterpResult<'tcx, bool> {
1365 let val = elem.to_scalar()?.to_int(elem.layout.size)?;
1369 _ => throw_ub_format!("each element of a SIMD mask must be all-0-bits or all-1-bits"),
1373 fn simd_bitmask_index(idx: u64, len: u64, endianess: Endian) -> u64 {
1376 Endian::Little => idx,
1377 Endian::Big => len.max(8) - 1 - idx, // reverse order of bits