5 use rustc_apfloat::{Float, Round};
6 use rustc_middle::ty::layout::{HasParamEnv, IntegerExt, LayoutOf};
7 use rustc_middle::{mir, mir::BinOp, ty, ty::FloatTy};
8 use rustc_target::abi::{Align, Endian, HasDataLayout, Integer, Size};
11 use helpers::check_arg_count;
14 MirOp(mir::BinOp, bool),
19 impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
20 pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
23 instance: ty::Instance<'tcx>,
24 args: &[OpTy<'tcx, Tag>],
25 dest: &PlaceTy<'tcx, Tag>,
26 ret: Option<mir::BasicBlock>,
27 _unwind: StackPopUnwind,
28 ) -> InterpResult<'tcx> {
29 let this = self.eval_context_mut();
31 if this.emulate_intrinsic(instance, args, dest, ret)? {
35 // All supported intrinsics have a return place.
36 let intrinsic_name = this.tcx.item_name(instance.def_id());
37 let intrinsic_name = intrinsic_name.as_str();
39 None => throw_unsup_format!("unimplemented (diverging) intrinsic: {}", intrinsic_name),
43 // Then handle terminating intrinsics.
44 match intrinsic_name {
45 // Miri overwriting CTFE intrinsics.
46 "ptr_guaranteed_eq" => {
47 let [left, right] = check_arg_count(args)?;
48 let left = this.read_immediate(left)?;
49 let right = this.read_immediate(right)?;
50 this.binop_ignore_overflow(mir::BinOp::Eq, &left, &right, dest)?;
52 "ptr_guaranteed_ne" => {
53 let [left, right] = check_arg_count(args)?;
54 let left = this.read_immediate(left)?;
55 let right = this.read_immediate(right)?;
56 this.binop_ignore_overflow(mir::BinOp::Ne, &left, &right, dest)?;
59 // For now, for compatibility with the run-time implementation of this, we just return null.
60 // See <https://github.com/rust-lang/rust/issues/93935>.
61 this.write_null(dest)?;
63 "const_deallocate" => {
67 // Raw memory accesses
69 let [place] = check_arg_count(args)?;
70 let place = this.deref_operand(place)?;
71 this.copy_op(&place.into(), dest)?;
74 let [place, dest] = check_arg_count(args)?;
75 let place = this.deref_operand(place)?;
76 this.copy_op(dest, &place.into())?;
79 "write_bytes" | "volatile_set_memory" => {
80 let [ptr, val_byte, count] = check_arg_count(args)?;
81 let ty = instance.substs.type_at(0);
82 let ty_layout = this.layout_of(ty)?;
83 let val_byte = this.read_scalar(val_byte)?.to_u8()?;
84 let ptr = this.read_pointer(ptr)?;
85 let count = this.read_scalar(count)?.to_machine_usize(this)?;
86 // `checked_mul` enforces a too small bound (the correct one would probably be machine_isize_max),
87 // but no actual allocation can be big enough for the difference to be noticeable.
88 let byte_count = ty_layout.size.checked_mul(count, this).ok_or_else(|| {
89 err_ub_format!("overflow computing total size of `{}`", intrinsic_name)
93 iter::repeat(val_byte).take(byte_count.bytes() as usize),
97 // Floating-point operations
99 let [f] = check_arg_count(args)?;
100 let f = this.read_scalar(f)?.to_f32()?;
101 // Can be implemented in soft-floats.
102 this.write_scalar(Scalar::from_f32(f.abs()), dest)?;
105 let [f] = check_arg_count(args)?;
106 let f = this.read_scalar(f)?.to_f64()?;
107 // Can be implemented in soft-floats.
108 this.write_scalar(Scalar::from_f64(f.abs()), dest)?;
124 let [f] = check_arg_count(args)?;
125 // FIXME: Using host floats.
126 let f = f32::from_bits(this.read_scalar(f)?.to_u32()?);
127 let f = match intrinsic_name {
130 "sqrtf32" => f.sqrt(),
132 "exp2f32" => f.exp2(),
134 "log10f32" => f.log10(),
135 "log2f32" => f.log2(),
136 "floorf32" => f.floor(),
137 "ceilf32" => f.ceil(),
138 "truncf32" => f.trunc(),
139 "roundf32" => f.round(),
142 this.write_scalar(Scalar::from_u32(f.to_bits()), dest)?;
159 let [f] = check_arg_count(args)?;
160 // FIXME: Using host floats.
161 let f = f64::from_bits(this.read_scalar(f)?.to_u64()?);
162 let f = match intrinsic_name {
165 "sqrtf64" => f.sqrt(),
167 "exp2f64" => f.exp2(),
169 "log10f64" => f.log10(),
170 "log2f64" => f.log2(),
171 "floorf64" => f.floor(),
172 "ceilf64" => f.ceil(),
173 "truncf64" => f.trunc(),
174 "roundf64" => f.round(),
177 this.write_scalar(Scalar::from_u64(f.to_bits()), dest)?;
187 let [a, b] = check_arg_count(args)?;
188 let a = this.read_immediate(a)?;
189 let b = this.read_immediate(b)?;
190 let op = match intrinsic_name {
191 "fadd_fast" => mir::BinOp::Add,
192 "fsub_fast" => mir::BinOp::Sub,
193 "fmul_fast" => mir::BinOp::Mul,
194 "fdiv_fast" => mir::BinOp::Div,
195 "frem_fast" => mir::BinOp::Rem,
198 let float_finite = |x: ImmTy<'tcx, _>| -> InterpResult<'tcx, bool> {
199 Ok(match x.layout.ty.kind() {
200 ty::Float(FloatTy::F32) => x.to_scalar()?.to_f32()?.is_finite(),
201 ty::Float(FloatTy::F64) => x.to_scalar()?.to_f64()?.is_finite(),
203 "`{}` called with non-float input type {:?}",
209 match (float_finite(a)?, float_finite(b)?) {
210 (false, false) => throw_ub_format!(
211 "`{}` intrinsic called with non-finite value as both parameters",
214 (false, _) => throw_ub_format!(
215 "`{}` intrinsic called with non-finite value as first parameter",
218 (_, false) => throw_ub_format!(
219 "`{}` intrinsic called with non-finite value as second parameter",
224 this.binop_ignore_overflow(op, &a, &b, dest)?;
232 let [a, b] = check_arg_count(args)?;
233 let a = this.read_scalar(a)?.to_f32()?;
234 let b = this.read_scalar(b)?.to_f32()?;
235 let res = match intrinsic_name {
236 "minnumf32" => a.min(b),
237 "maxnumf32" => a.max(b),
238 "copysignf32" => a.copy_sign(b),
241 this.write_scalar(Scalar::from_f32(res), dest)?;
249 let [a, b] = check_arg_count(args)?;
250 let a = this.read_scalar(a)?.to_f64()?;
251 let b = this.read_scalar(b)?.to_f64()?;
252 let res = match intrinsic_name {
253 "minnumf64" => a.min(b),
254 "maxnumf64" => a.max(b),
255 "copysignf64" => a.copy_sign(b),
258 this.write_scalar(Scalar::from_f64(res), dest)?;
262 let [f, f2] = check_arg_count(args)?;
263 // FIXME: Using host floats.
264 let f = f32::from_bits(this.read_scalar(f)?.to_u32()?);
265 let f2 = f32::from_bits(this.read_scalar(f2)?.to_u32()?);
266 this.write_scalar(Scalar::from_u32(f.powf(f2).to_bits()), dest)?;
270 let [f, f2] = check_arg_count(args)?;
271 // FIXME: Using host floats.
272 let f = f64::from_bits(this.read_scalar(f)?.to_u64()?);
273 let f2 = f64::from_bits(this.read_scalar(f2)?.to_u64()?);
274 this.write_scalar(Scalar::from_u64(f.powf(f2).to_bits()), dest)?;
278 let [a, b, c] = check_arg_count(args)?;
279 let a = this.read_scalar(a)?.to_f32()?;
280 let b = this.read_scalar(b)?.to_f32()?;
281 let c = this.read_scalar(c)?.to_f32()?;
282 let res = a.mul_add(b, c).value;
283 this.write_scalar(Scalar::from_f32(res), dest)?;
287 let [a, b, c] = check_arg_count(args)?;
288 let a = this.read_scalar(a)?.to_f64()?;
289 let b = this.read_scalar(b)?.to_f64()?;
290 let c = this.read_scalar(c)?.to_f64()?;
291 let res = a.mul_add(b, c).value;
292 this.write_scalar(Scalar::from_f64(res), dest)?;
296 let [f, i] = check_arg_count(args)?;
297 // FIXME: Using host floats.
298 let f = f32::from_bits(this.read_scalar(f)?.to_u32()?);
299 let i = this.read_scalar(i)?.to_i32()?;
300 this.write_scalar(Scalar::from_u32(f.powi(i).to_bits()), dest)?;
304 let [f, i] = check_arg_count(args)?;
305 // FIXME: Using host floats.
306 let f = f64::from_bits(this.read_scalar(f)?.to_u64()?);
307 let i = this.read_scalar(i)?.to_i32()?;
308 this.write_scalar(Scalar::from_u64(f.powi(i).to_bits()), dest)?;
311 "float_to_int_unchecked" => {
312 let [val] = check_arg_count(args)?;
313 let val = this.read_immediate(val)?;
315 let res = match val.layout.ty.kind() {
316 ty::Float(FloatTy::F32) =>
317 this.float_to_int_unchecked(val.to_scalar()?.to_f32()?, dest.layout.ty)?,
318 ty::Float(FloatTy::F64) =>
319 this.float_to_int_unchecked(val.to_scalar()?.to_f64()?, dest.layout.ty)?,
322 "`float_to_int_unchecked` called with non-float input type {:?}",
327 this.write_scalar(res, dest)?;
339 let [op] = check_arg_count(args)?;
340 let (op, op_len) = this.operand_to_simd(op)?;
341 let (dest, dest_len) = this.place_to_simd(dest)?;
343 assert_eq!(dest_len, op_len);
345 #[derive(Copy, Clone)]
353 #[derive(Copy, Clone)]
359 let which = match intrinsic_name {
360 "simd_neg" => Op::MirOp(mir::UnOp::Neg),
361 "simd_fabs" => Op::Abs,
362 "simd_ceil" => Op::HostOp(HostFloatOp::Ceil),
363 "simd_floor" => Op::HostOp(HostFloatOp::Floor),
364 "simd_round" => Op::HostOp(HostFloatOp::Round),
365 "simd_trunc" => Op::HostOp(HostFloatOp::Trunc),
366 "simd_fsqrt" => Op::HostOp(HostFloatOp::Sqrt),
370 for i in 0..dest_len {
371 let op = this.read_immediate(&this.mplace_index(&op, i)?.into())?;
372 let dest = this.mplace_index(&dest, i)?;
373 let val = match which {
374 Op::MirOp(mir_op) => this.unary_op(mir_op, &op)?.to_scalar()?,
376 // Works for f32 and f64.
377 let ty::Float(float_ty) = op.layout.ty.kind() else {
378 bug!("{} operand is not a float", intrinsic_name)
380 let op = op.to_scalar()?;
382 FloatTy::F32 => Scalar::from_f32(op.to_f32()?.abs()),
383 FloatTy::F64 => Scalar::from_f64(op.to_f64()?.abs()),
386 Op::HostOp(host_op) => {
387 let ty::Float(float_ty) = op.layout.ty.kind() else {
388 bug!("{} operand is not a float", intrinsic_name)
390 // FIXME using host floats
393 let f = f32::from_bits(op.to_scalar()?.to_u32()?);
394 let res = match host_op {
395 HostFloatOp::Ceil => f.ceil(),
396 HostFloatOp::Floor => f.floor(),
397 HostFloatOp::Round => f.round(),
398 HostFloatOp::Trunc => f.trunc(),
399 HostFloatOp::Sqrt => f.sqrt(),
401 Scalar::from_u32(res.to_bits())
404 let f = f64::from_bits(op.to_scalar()?.to_u64()?);
405 let res = match host_op {
406 HostFloatOp::Ceil => f.ceil(),
407 HostFloatOp::Floor => f.floor(),
408 HostFloatOp::Round => f.round(),
409 HostFloatOp::Trunc => f.trunc(),
410 HostFloatOp::Sqrt => f.sqrt(),
412 Scalar::from_u64(res.to_bits())
418 this.write_scalar(val, &dest.into())?;
440 | "simd_saturating_add"
441 | "simd_saturating_sub"
442 | "simd_arith_offset" => {
445 let [left, right] = check_arg_count(args)?;
446 let (left, left_len) = this.operand_to_simd(left)?;
447 let (right, right_len) = this.operand_to_simd(right)?;
448 let (dest, dest_len) = this.place_to_simd(dest)?;
450 assert_eq!(dest_len, left_len);
451 assert_eq!(dest_len, right_len);
460 let which = match intrinsic_name {
461 "simd_add" => Op::MirOp(BinOp::Add),
462 "simd_sub" => Op::MirOp(BinOp::Sub),
463 "simd_mul" => Op::MirOp(BinOp::Mul),
464 "simd_div" => Op::MirOp(BinOp::Div),
465 "simd_rem" => Op::MirOp(BinOp::Rem),
466 "simd_shl" => Op::MirOp(BinOp::Shl),
467 "simd_shr" => Op::MirOp(BinOp::Shr),
468 "simd_and" => Op::MirOp(BinOp::BitAnd),
469 "simd_or" => Op::MirOp(BinOp::BitOr),
470 "simd_xor" => Op::MirOp(BinOp::BitXor),
471 "simd_eq" => Op::MirOp(BinOp::Eq),
472 "simd_ne" => Op::MirOp(BinOp::Ne),
473 "simd_lt" => Op::MirOp(BinOp::Lt),
474 "simd_le" => Op::MirOp(BinOp::Le),
475 "simd_gt" => Op::MirOp(BinOp::Gt),
476 "simd_ge" => Op::MirOp(BinOp::Ge),
477 "simd_fmax" => Op::FMax,
478 "simd_fmin" => Op::FMin,
479 "simd_saturating_add" => Op::SaturatingOp(BinOp::Add),
480 "simd_saturating_sub" => Op::SaturatingOp(BinOp::Sub),
481 "simd_arith_offset" => Op::WrappingOffset,
485 for i in 0..dest_len {
486 let left = this.read_immediate(&this.mplace_index(&left, i)?.into())?;
487 let right = this.read_immediate(&this.mplace_index(&right, i)?.into())?;
488 let dest = this.mplace_index(&dest, i)?;
489 let val = match which {
490 Op::MirOp(mir_op) => {
491 let (val, overflowed, ty) = this.overflowing_binary_op(mir_op, &left, &right)?;
492 if matches!(mir_op, BinOp::Shl | BinOp::Shr) {
493 // Shifts have extra UB as SIMD operations that the MIR binop does not have.
494 // See <https://github.com/rust-lang/rust/issues/91237>.
496 let r_val = right.to_scalar()?.to_bits(right.layout.size)?;
497 throw_ub_format!("overflowing shift by {} in `{}` in SIMD lane {}", r_val, intrinsic_name, i);
500 if matches!(mir_op, BinOp::Eq | BinOp::Ne | BinOp::Lt | BinOp::Le | BinOp::Gt | BinOp::Ge) {
501 // Special handling for boolean-returning operations
502 assert_eq!(ty, this.tcx.types.bool);
503 let val = val.to_bool().unwrap();
504 bool_to_simd_element(val, dest.layout.size)
506 assert_ne!(ty, this.tcx.types.bool);
507 assert_eq!(ty, dest.layout.ty);
511 Op::SaturatingOp(mir_op) => {
512 this.saturating_arith(mir_op, &left, &right)?
514 Op::WrappingOffset => {
515 let ptr = this.scalar_to_ptr(left.to_scalar()?)?;
516 let offset_count = right.to_scalar()?.to_machine_isize(this)?;
517 let pointee_ty = left.layout.ty.builtin_deref(true).unwrap().ty;
519 let pointee_size = i64::try_from(this.layout_of(pointee_ty)?.size.bytes()).unwrap();
520 let offset_bytes = offset_count.wrapping_mul(pointee_size);
521 let offset_ptr = ptr.wrapping_signed_offset(offset_bytes, this);
522 Scalar::from_maybe_pointer(offset_ptr, this)
525 fmax_op(&left, &right)?
528 fmin_op(&left, &right)?
531 this.write_scalar(val, &dest.into())?;
535 let [a, b, c] = check_arg_count(args)?;
536 let (a, a_len) = this.operand_to_simd(a)?;
537 let (b, b_len) = this.operand_to_simd(b)?;
538 let (c, c_len) = this.operand_to_simd(c)?;
539 let (dest, dest_len) = this.place_to_simd(dest)?;
541 assert_eq!(dest_len, a_len);
542 assert_eq!(dest_len, b_len);
543 assert_eq!(dest_len, c_len);
545 for i in 0..dest_len {
546 let a = this.read_immediate(&this.mplace_index(&a, i)?.into())?.to_scalar()?;
547 let b = this.read_immediate(&this.mplace_index(&b, i)?.into())?.to_scalar()?;
548 let c = this.read_immediate(&this.mplace_index(&c, i)?.into())?.to_scalar()?;
549 let dest = this.mplace_index(&dest, i)?;
551 // Works for f32 and f64.
552 let ty::Float(float_ty) = dest.layout.ty.kind() else {
553 bug!("{} operand is not a float", intrinsic_name)
555 let val = match float_ty {
557 Scalar::from_f32(a.to_f32()?.mul_add(b.to_f32()?, c.to_f32()?).value),
559 Scalar::from_f64(a.to_f64()?.mul_add(b.to_f64()?, c.to_f64()?).value),
561 this.write_scalar(val, &dest.into())?;
571 | "simd_reduce_min" => {
574 let [op] = check_arg_count(args)?;
575 let (op, op_len) = this.operand_to_simd(op)?;
578 |b| ImmTy::from_scalar(Scalar::from_bool(b), this.machine.layouts.bool);
586 let which = match intrinsic_name {
587 "simd_reduce_and" => Op::MirOp(BinOp::BitAnd),
588 "simd_reduce_or" => Op::MirOp(BinOp::BitOr),
589 "simd_reduce_xor" => Op::MirOp(BinOp::BitXor),
590 "simd_reduce_any" => Op::MirOpBool(BinOp::BitOr),
591 "simd_reduce_all" => Op::MirOpBool(BinOp::BitAnd),
592 "simd_reduce_max" => Op::Max,
593 "simd_reduce_min" => Op::Min,
597 // Initialize with first lane, then proceed with the rest.
598 let mut res = this.read_immediate(&this.mplace_index(&op, 0)?.into())?;
599 if matches!(which, Op::MirOpBool(_)) {
600 // Convert to `bool` scalar.
601 res = imm_from_bool(simd_element_to_bool(res)?);
604 let op = this.read_immediate(&this.mplace_index(&op, i)?.into())?;
606 Op::MirOp(mir_op) => {
607 this.binary_op(mir_op, &res, &op)?
609 Op::MirOpBool(mir_op) => {
610 let op = imm_from_bool(simd_element_to_bool(op)?);
611 this.binary_op(mir_op, &res, &op)?
614 if matches!(res.layout.ty.kind(), ty::Float(_)) {
615 ImmTy::from_scalar(fmax_op(&res, &op)?, res.layout)
617 // Just boring integers, so NaNs to worry about
618 if this.binary_op(BinOp::Ge, &res, &op)?.to_scalar()?.to_bool()? {
626 if matches!(res.layout.ty.kind(), ty::Float(_)) {
627 ImmTy::from_scalar(fmin_op(&res, &op)?, res.layout)
629 // Just boring integers, so NaNs to worry about
630 if this.binary_op(BinOp::Le, &res, &op)?.to_scalar()?.to_bool()? {
639 this.write_immediate(*res, dest)?;
642 | "simd_reduce_add_ordered"
643 | "simd_reduce_mul_ordered" => {
646 let [op, init] = check_arg_count(args)?;
647 let (op, op_len) = this.operand_to_simd(op)?;
648 let init = this.read_immediate(init)?;
650 let mir_op = match intrinsic_name {
651 "simd_reduce_add_ordered" => BinOp::Add,
652 "simd_reduce_mul_ordered" => BinOp::Mul,
658 let op = this.read_immediate(&this.mplace_index(&op, i)?.into())?;
659 res = this.binary_op(mir_op, &res, &op)?;
661 this.write_immediate(*res, dest)?;
664 let [mask, yes, no] = check_arg_count(args)?;
665 let (mask, mask_len) = this.operand_to_simd(mask)?;
666 let (yes, yes_len) = this.operand_to_simd(yes)?;
667 let (no, no_len) = this.operand_to_simd(no)?;
668 let (dest, dest_len) = this.place_to_simd(dest)?;
670 assert_eq!(dest_len, mask_len);
671 assert_eq!(dest_len, yes_len);
672 assert_eq!(dest_len, no_len);
674 for i in 0..dest_len {
675 let mask = this.read_immediate(&this.mplace_index(&mask, i)?.into())?;
676 let yes = this.read_immediate(&this.mplace_index(&yes, i)?.into())?;
677 let no = this.read_immediate(&this.mplace_index(&no, i)?.into())?;
678 let dest = this.mplace_index(&dest, i)?;
680 let val = if simd_element_to_bool(mask)? { yes } else { no };
681 this.write_immediate(*val, &dest.into())?;
684 "simd_select_bitmask" => {
685 let [mask, yes, no] = check_arg_count(args)?;
686 let (yes, yes_len) = this.operand_to_simd(yes)?;
687 let (no, no_len) = this.operand_to_simd(no)?;
688 let (dest, dest_len) = this.place_to_simd(dest)?;
689 let bitmask_len = dest_len.max(8);
691 assert!(mask.layout.ty.is_integral());
692 assert!(bitmask_len <= 64);
693 assert_eq!(bitmask_len, mask.layout.size.bits());
694 assert_eq!(dest_len, yes_len);
695 assert_eq!(dest_len, no_len);
700 .to_bits(mask.layout.size)?
703 for i in 0..dest_len {
705 mask & (1 << simd_bitmask_index(i, dest_len, this.data_layout().endian));
706 let yes = this.read_immediate(&this.mplace_index(&yes, i)?.into())?;
707 let no = this.read_immediate(&this.mplace_index(&no, i)?.into())?;
708 let dest = this.mplace_index(&dest, i)?;
710 let val = if mask != 0 { yes } else { no };
711 this.write_immediate(*val, &dest.into())?;
713 for i in dest_len..bitmask_len {
714 // If the mask is "padded", ensure that padding is all-zero.
715 let mask = mask & (1 << i);
718 "a SIMD bitmask less than 8 bits long must be filled with 0s for the remaining bits"
724 "simd_cast" | "simd_as" => {
725 let [op] = check_arg_count(args)?;
726 let (op, op_len) = this.operand_to_simd(op)?;
727 let (dest, dest_len) = this.place_to_simd(dest)?;
729 assert_eq!(dest_len, op_len);
731 let safe_cast = intrinsic_name == "simd_as";
733 for i in 0..dest_len {
734 let op = this.read_immediate(&this.mplace_index(&op, i)?.into())?;
735 let dest = this.mplace_index(&dest, i)?;
737 let val = match (op.layout.ty.kind(), dest.layout.ty.kind()) {
738 // Int-to-(int|float): always safe
739 (ty::Int(_) | ty::Uint(_), ty::Int(_) | ty::Uint(_) | ty::Float(_)) =>
740 this.misc_cast(&op, dest.layout.ty)?,
741 // Float-to-float: always safe
742 (ty::Float(_), ty::Float(_)) =>
743 this.misc_cast(&op, dest.layout.ty)?,
744 // Float-to-int in safe mode
745 (ty::Float(_), ty::Int(_) | ty::Uint(_)) if safe_cast =>
746 this.misc_cast(&op, dest.layout.ty)?,
747 // Float-to-int in unchecked mode
748 (ty::Float(FloatTy::F32), ty::Int(_) | ty::Uint(_)) if !safe_cast =>
749 this.float_to_int_unchecked(op.to_scalar()?.to_f32()?, dest.layout.ty)?.into(),
750 (ty::Float(FloatTy::F64), ty::Int(_) | ty::Uint(_)) if !safe_cast =>
751 this.float_to_int_unchecked(op.to_scalar()?.to_f64()?, dest.layout.ty)?.into(),
754 "Unsupported SIMD cast from element type {} to {}",
759 this.write_immediate(val, &dest.into())?;
763 let [left, right, index] = check_arg_count(args)?;
764 let (left, left_len) = this.operand_to_simd(left)?;
765 let (right, right_len) = this.operand_to_simd(right)?;
766 let (dest, dest_len) = this.place_to_simd(dest)?;
768 // `index` is an array, not a SIMD type
769 let ty::Array(_, index_len) = index.layout.ty.kind() else {
770 bug!("simd_shuffle index argument has non-array type {}", index.layout.ty)
772 let index_len = index_len.eval_usize(*this.tcx, this.param_env());
774 assert_eq!(left_len, right_len);
775 assert_eq!(index_len, dest_len);
777 for i in 0..dest_len {
778 let src_index: u64 = this
779 .read_immediate(&this.operand_index(index, i)?)?
783 let dest = this.mplace_index(&dest, i)?;
785 let val = if src_index < left_len {
786 this.read_immediate(&this.mplace_index(&left, src_index)?.into())?
787 } else if src_index < left_len.checked_add(right_len).unwrap() {
789 &this.mplace_index(&right, src_index - left_len)?.into(),
793 "simd_shuffle index {} is out of bounds for 2 vectors of size {}",
798 this.write_immediate(*val, &dest.into())?;
802 let [passthru, ptrs, mask] = check_arg_count(args)?;
803 let (passthru, passthru_len) = this.operand_to_simd(passthru)?;
804 let (ptrs, ptrs_len) = this.operand_to_simd(ptrs)?;
805 let (mask, mask_len) = this.operand_to_simd(mask)?;
806 let (dest, dest_len) = this.place_to_simd(dest)?;
808 assert_eq!(dest_len, passthru_len);
809 assert_eq!(dest_len, ptrs_len);
810 assert_eq!(dest_len, mask_len);
812 for i in 0..dest_len {
813 let passthru = this.read_immediate(&this.mplace_index(&passthru, i)?.into())?;
814 let ptr = this.read_immediate(&this.mplace_index(&ptrs, i)?.into())?;
815 let mask = this.read_immediate(&this.mplace_index(&mask, i)?.into())?;
816 let dest = this.mplace_index(&dest, i)?;
818 let val = if simd_element_to_bool(mask)? {
819 let place = this.deref_operand(&ptr.into())?;
820 this.read_immediate(&place.into())?
824 this.write_immediate(*val, &dest.into())?;
828 let [value, ptrs, mask] = check_arg_count(args)?;
829 let (value, value_len) = this.operand_to_simd(value)?;
830 let (ptrs, ptrs_len) = this.operand_to_simd(ptrs)?;
831 let (mask, mask_len) = this.operand_to_simd(mask)?;
833 assert_eq!(ptrs_len, value_len);
834 assert_eq!(ptrs_len, mask_len);
836 for i in 0..ptrs_len {
837 let value = this.read_immediate(&this.mplace_index(&value, i)?.into())?;
838 let ptr = this.read_immediate(&this.mplace_index(&ptrs, i)?.into())?;
839 let mask = this.read_immediate(&this.mplace_index(&mask, i)?.into())?;
841 if simd_element_to_bool(mask)? {
842 let place = this.deref_operand(&ptr.into())?;
843 this.write_immediate(*value, &place.into())?;
848 let [op] = check_arg_count(args)?;
849 let (op, op_len) = this.operand_to_simd(op)?;
850 let bitmask_len = op_len.max(8);
852 assert!(dest.layout.ty.is_integral());
853 assert!(bitmask_len <= 64);
854 assert_eq!(bitmask_len, dest.layout.size.bits());
858 let op = this.read_immediate(&this.mplace_index(&op, i)?.into())?;
859 if simd_element_to_bool(op)? {
860 res |= 1 << simd_bitmask_index(i, op_len, this.data_layout().endian);
863 this.write_int(res, dest)?;
867 "atomic_load_seqcst" => this.atomic_load(args, dest, AtomicReadOp::SeqCst)?,
868 "atomic_load_relaxed" => this.atomic_load(args, dest, AtomicReadOp::Relaxed)?,
869 "atomic_load_acquire" => this.atomic_load(args, dest, AtomicReadOp::Acquire)?,
871 "atomic_store_seqcst" => this.atomic_store(args, AtomicWriteOp::SeqCst)?,
872 "atomic_store_relaxed" => this.atomic_store(args, AtomicWriteOp::Relaxed)?,
873 "atomic_store_release" => this.atomic_store(args, AtomicWriteOp::Release)?,
875 "atomic_fence_acquire" => this.atomic_fence(args, AtomicFenceOp::Acquire)?,
876 "atomic_fence_release" => this.atomic_fence(args, AtomicFenceOp::Release)?,
877 "atomic_fence_acqrel" => this.atomic_fence(args, AtomicFenceOp::AcqRel)?,
878 "atomic_fence_seqcst" => this.atomic_fence(args, AtomicFenceOp::SeqCst)?,
880 "atomic_singlethreadfence_acquire" =>
881 this.compiler_fence(args, AtomicFenceOp::Acquire)?,
882 "atomic_singlethreadfence_release" =>
883 this.compiler_fence(args, AtomicFenceOp::Release)?,
884 "atomic_singlethreadfence_acqrel" =>
885 this.compiler_fence(args, AtomicFenceOp::AcqRel)?,
886 "atomic_singlethreadfence_seqcst" =>
887 this.compiler_fence(args, AtomicFenceOp::SeqCst)?,
889 "atomic_xchg_seqcst" => this.atomic_exchange(args, dest, AtomicRwOp::SeqCst)?,
890 "atomic_xchg_acquire" => this.atomic_exchange(args, dest, AtomicRwOp::Acquire)?,
891 "atomic_xchg_release" => this.atomic_exchange(args, dest, AtomicRwOp::Release)?,
892 "atomic_xchg_acqrel" => this.atomic_exchange(args, dest, AtomicRwOp::AcqRel)?,
893 "atomic_xchg_relaxed" => this.atomic_exchange(args, dest, AtomicRwOp::Relaxed)?,
896 "atomic_cxchg_seqcst_seqcst" =>
897 this.atomic_compare_exchange(args, dest, AtomicRwOp::SeqCst, AtomicReadOp::SeqCst)?,
899 "atomic_cxchg_acquire_acquire" =>
900 this.atomic_compare_exchange(args, dest, AtomicRwOp::Acquire, AtomicReadOp::Acquire)?,
902 "atomic_cxchg_release_relaxed" =>
903 this.atomic_compare_exchange(args, dest, AtomicRwOp::Release, AtomicReadOp::Relaxed)?,
905 "atomic_cxchg_acqrel_acquire" =>
906 this.atomic_compare_exchange(args, dest, AtomicRwOp::AcqRel, AtomicReadOp::Acquire)?,
908 "atomic_cxchg_relaxed_relaxed" =>
909 this.atomic_compare_exchange(args, dest, AtomicRwOp::Relaxed, AtomicReadOp::Relaxed)?,
911 "atomic_cxchg_acquire_relaxed" =>
912 this.atomic_compare_exchange(args, dest, AtomicRwOp::Acquire, AtomicReadOp::Relaxed)?,
914 "atomic_cxchg_acqrel_relaxed" =>
915 this.atomic_compare_exchange(args, dest, AtomicRwOp::AcqRel, AtomicReadOp::Relaxed)?,
917 "atomic_cxchg_seqcst_relaxed" =>
918 this.atomic_compare_exchange(args, dest, AtomicRwOp::SeqCst, AtomicReadOp::Relaxed)?,
920 "atomic_cxchg_seqcst_acquire" =>
921 this.atomic_compare_exchange(args, dest, AtomicRwOp::SeqCst, AtomicReadOp::Acquire)?,
924 "atomic_cxchgweak_seqcst_seqcst" =>
925 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::SeqCst, AtomicReadOp::SeqCst)?,
927 "atomic_cxchgweak_acquire_acquire" =>
928 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::Acquire, AtomicReadOp::Acquire)?,
930 "atomic_cxchgweak_release_relaxed" =>
931 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::Release, AtomicReadOp::Relaxed)?,
933 "atomic_cxchgweak_acqrel_acquire" =>
934 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::AcqRel, AtomicReadOp::Acquire)?,
936 "atomic_cxchgweak_relaxed_relaxed" =>
937 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::Relaxed, AtomicReadOp::Relaxed)?,
939 "atomic_cxchgweak_acquire_relaxed" =>
940 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::Acquire, AtomicReadOp::Relaxed)?,
942 "atomic_cxchgweak_acqrel_relaxed" =>
943 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::AcqRel, AtomicReadOp::Relaxed)?,
945 "atomic_cxchgweak_seqcst_relaxed" =>
946 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::SeqCst, AtomicReadOp::Relaxed)?,
948 "atomic_cxchgweak_seqcst_acquire" =>
949 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::SeqCst, AtomicReadOp::Acquire)?,
952 "atomic_or_seqcst" =>
953 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOp::SeqCst)?,
955 "atomic_or_acquire" =>
956 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOp::Acquire)?,
958 "atomic_or_release" =>
959 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOp::Release)?,
961 "atomic_or_acqrel" =>
962 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOp::AcqRel)?,
964 "atomic_or_relaxed" =>
965 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOp::Relaxed)?,
967 "atomic_xor_seqcst" =>
968 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOp::SeqCst)?,
970 "atomic_xor_acquire" =>
971 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOp::Acquire)?,
973 "atomic_xor_release" =>
974 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOp::Release)?,
976 "atomic_xor_acqrel" =>
977 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOp::AcqRel)?,
979 "atomic_xor_relaxed" =>
980 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOp::Relaxed)?,
982 "atomic_and_seqcst" =>
983 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOp::SeqCst)?,
985 "atomic_and_acquire" =>
986 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOp::Acquire)?,
988 "atomic_and_release" =>
989 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOp::Release)?,
991 "atomic_and_acqrel" =>
992 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOp::AcqRel)?,
994 "atomic_and_relaxed" =>
995 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOp::Relaxed)?,
997 "atomic_nand_seqcst" =>
998 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOp::SeqCst)?,
1000 "atomic_nand_acquire" =>
1001 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOp::Acquire)?,
1003 "atomic_nand_release" =>
1004 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOp::Release)?,
1006 "atomic_nand_acqrel" =>
1007 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOp::AcqRel)?,
1009 "atomic_nand_relaxed" =>
1010 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOp::Relaxed)?,
1012 "atomic_xadd_seqcst" =>
1013 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOp::SeqCst)?,
1015 "atomic_xadd_acquire" =>
1016 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOp::Acquire)?,
1018 "atomic_xadd_release" =>
1019 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOp::Release)?,
1021 "atomic_xadd_acqrel" =>
1022 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOp::AcqRel)?,
1024 "atomic_xadd_relaxed" =>
1025 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOp::Relaxed)?,
1027 "atomic_xsub_seqcst" =>
1028 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOp::SeqCst)?,
1030 "atomic_xsub_acquire" =>
1031 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOp::Acquire)?,
1033 "atomic_xsub_release" =>
1034 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOp::Release)?,
1036 "atomic_xsub_acqrel" =>
1037 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOp::AcqRel)?,
1039 "atomic_xsub_relaxed" =>
1040 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOp::Relaxed)?,
1041 "atomic_min_seqcst" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::SeqCst)?,
1042 "atomic_min_acquire" =>
1043 this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::Acquire)?,
1044 "atomic_min_release" =>
1045 this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::Release)?,
1046 "atomic_min_acqrel" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::AcqRel)?,
1047 "atomic_min_relaxed" =>
1048 this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::Relaxed)?,
1049 "atomic_max_seqcst" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::SeqCst)?,
1050 "atomic_max_acquire" =>
1051 this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::Acquire)?,
1052 "atomic_max_release" =>
1053 this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::Release)?,
1054 "atomic_max_acqrel" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::AcqRel)?,
1055 "atomic_max_relaxed" =>
1056 this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::Relaxed)?,
1057 "atomic_umin_seqcst" =>
1058 this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::SeqCst)?,
1059 "atomic_umin_acquire" =>
1060 this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::Acquire)?,
1061 "atomic_umin_release" =>
1062 this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::Release)?,
1063 "atomic_umin_acqrel" =>
1064 this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::AcqRel)?,
1065 "atomic_umin_relaxed" =>
1066 this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::Relaxed)?,
1067 "atomic_umax_seqcst" =>
1068 this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::SeqCst)?,
1069 "atomic_umax_acquire" =>
1070 this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::Acquire)?,
1071 "atomic_umax_release" =>
1072 this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::Release)?,
1073 "atomic_umax_acqrel" =>
1074 this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::AcqRel)?,
1075 "atomic_umax_relaxed" =>
1076 this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::Relaxed)?,
1080 let [num, denom] = check_arg_count(args)?;
1081 this.exact_div(&this.read_immediate(num)?, &this.read_immediate(denom)?, dest)?;
1084 "try" => return this.handle_try(args, dest, ret),
1087 let [] = check_arg_count(args)?;
1088 // normally this would raise a SIGTRAP, which aborts if no debugger is connected
1089 throw_machine_stop!(TerminationInfo::Abort("Trace/breakpoint trap".to_string()))
1092 name => throw_unsup_format!("unimplemented intrinsic: {}", name),
1095 trace!("{:?}", this.dump_place(**dest));
1096 this.go_to_block(ret);
1102 args: &[OpTy<'tcx, Tag>],
1103 dest: &PlaceTy<'tcx, Tag>,
1104 atomic: AtomicReadOp,
1105 ) -> InterpResult<'tcx> {
1106 let this = self.eval_context_mut();
1108 let [place] = check_arg_count(args)?;
1109 let place = this.deref_operand(place)?;
1111 // make sure it fits into a scalar; otherwise it cannot be atomic
1112 let val = this.read_scalar_atomic(&place, atomic)?;
1114 // Check alignment requirements. Atomics must always be aligned to their size,
1115 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
1117 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
1118 this.check_ptr_access_align(
1122 CheckInAllocMsg::MemoryAccessTest,
1124 // Perform regular access.
1125 this.write_scalar(val, dest)?;
1131 args: &[OpTy<'tcx, Tag>],
1132 atomic: AtomicWriteOp,
1133 ) -> InterpResult<'tcx> {
1134 let this = self.eval_context_mut();
1136 let [place, val] = check_arg_count(args)?;
1137 let place = this.deref_operand(place)?;
1138 let val = this.read_scalar(val)?; // make sure it fits into a scalar; otherwise it cannot be atomic
1140 // Check alignment requirements. Atomics must always be aligned to their size,
1141 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
1143 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
1144 this.check_ptr_access_align(
1148 CheckInAllocMsg::MemoryAccessTest,
1151 // Perform atomic store
1152 this.write_scalar_atomic(val, &place, atomic)?;
1158 args: &[OpTy<'tcx, Tag>],
1159 atomic: AtomicFenceOp,
1160 ) -> InterpResult<'tcx> {
1161 let [] = check_arg_count(args)?;
1163 //FIXME: compiler fences are currently ignored
1169 args: &[OpTy<'tcx, Tag>],
1170 atomic: AtomicFenceOp,
1171 ) -> InterpResult<'tcx> {
1172 let this = self.eval_context_mut();
1173 let [] = check_arg_count(args)?;
1174 this.validate_atomic_fence(atomic)?;
1180 args: &[OpTy<'tcx, Tag>],
1181 dest: &PlaceTy<'tcx, Tag>,
1182 atomic_op: AtomicOp,
1184 ) -> InterpResult<'tcx> {
1185 let this = self.eval_context_mut();
1187 let [place, rhs] = check_arg_count(args)?;
1188 let place = this.deref_operand(place)?;
1190 if !place.layout.ty.is_integral() {
1191 bug!("Atomic arithmetic operations only work on integer types");
1193 let rhs = this.read_immediate(rhs)?;
1195 // Check alignment requirements. Atomics must always be aligned to their size,
1196 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
1198 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
1199 this.check_ptr_access_align(
1203 CheckInAllocMsg::MemoryAccessTest,
1208 let old = this.atomic_min_max_scalar(&place, rhs, true, atomic)?;
1209 this.write_immediate(*old, dest)?; // old value is returned
1213 let old = this.atomic_min_max_scalar(&place, rhs, false, atomic)?;
1214 this.write_immediate(*old, dest)?; // old value is returned
1217 AtomicOp::MirOp(op, neg) => {
1218 let old = this.atomic_op_immediate(&place, &rhs, op, neg, atomic)?;
1219 this.write_immediate(*old, dest)?; // old value is returned
1227 args: &[OpTy<'tcx, Tag>],
1228 dest: &PlaceTy<'tcx, Tag>,
1230 ) -> InterpResult<'tcx> {
1231 let this = self.eval_context_mut();
1233 let [place, new] = check_arg_count(args)?;
1234 let place = this.deref_operand(place)?;
1235 let new = this.read_scalar(new)?;
1237 // Check alignment requirements. Atomics must always be aligned to their size,
1238 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
1240 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
1241 this.check_ptr_access_align(
1245 CheckInAllocMsg::MemoryAccessTest,
1248 let old = this.atomic_exchange_scalar(&place, new, atomic)?;
1249 this.write_scalar(old, dest)?; // old value is returned
1253 fn atomic_compare_exchange_impl(
1255 args: &[OpTy<'tcx, Tag>],
1256 dest: &PlaceTy<'tcx, Tag>,
1257 success: AtomicRwOp,
1259 can_fail_spuriously: bool,
1260 ) -> InterpResult<'tcx> {
1261 let this = self.eval_context_mut();
1263 let [place, expect_old, new] = check_arg_count(args)?;
1264 let place = this.deref_operand(place)?;
1265 let expect_old = this.read_immediate(expect_old)?; // read as immediate for the sake of `binary_op()`
1266 let new = this.read_scalar(new)?;
1268 // Check alignment requirements. Atomics must always be aligned to their size,
1269 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
1271 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
1272 this.check_ptr_access_align(
1276 CheckInAllocMsg::MemoryAccessTest,
1279 let old = this.atomic_compare_exchange_scalar(
1285 can_fail_spuriously,
1288 // Return old value.
1289 this.write_immediate(old, dest)?;
1293 fn atomic_compare_exchange(
1295 args: &[OpTy<'tcx, Tag>],
1296 dest: &PlaceTy<'tcx, Tag>,
1297 success: AtomicRwOp,
1299 ) -> InterpResult<'tcx> {
1300 self.atomic_compare_exchange_impl(args, dest, success, fail, false)
1303 fn atomic_compare_exchange_weak(
1305 args: &[OpTy<'tcx, Tag>],
1306 dest: &PlaceTy<'tcx, Tag>,
1307 success: AtomicRwOp,
1309 ) -> InterpResult<'tcx> {
1310 self.atomic_compare_exchange_impl(args, dest, success, fail, true)
1313 fn float_to_int_unchecked<F>(
1316 dest_ty: ty::Ty<'tcx>,
1317 ) -> InterpResult<'tcx, Scalar<Tag>>
1319 F: Float + Into<Scalar<Tag>>,
1321 let this = self.eval_context_ref();
1323 // Step 1: cut off the fractional part of `f`. The result of this is
1324 // guaranteed to be precisely representable in IEEE floats.
1325 let f = f.round_to_integral(Round::TowardZero).value;
1327 // Step 2: Cast the truncated float to the target integer type and see if we lose any information in this step.
1328 Ok(match dest_ty.kind() {
1331 let size = Integer::from_uint_ty(this, *t).size();
1332 let res = f.to_u128(size.bits_usize());
1333 if res.status.is_empty() {
1334 // No status flags means there was no further rounding or other loss of precision.
1335 Scalar::from_uint(res.value, size)
1337 // `f` was not representable in this integer type.
1339 "`float_to_int_unchecked` intrinsic called on {} which cannot be represented in target type `{:?}`",
1347 let size = Integer::from_int_ty(this, *t).size();
1348 let res = f.to_i128(size.bits_usize());
1349 if res.status.is_empty() {
1350 // No status flags means there was no further rounding or other loss of precision.
1351 Scalar::from_int(res.value, size)
1353 // `f` was not representable in this integer type.
1355 "`float_to_int_unchecked` intrinsic called on {} which cannot be represented in target type `{:?}`",
1362 _ => bug!("`float_to_int_unchecked` called with non-int output type {:?}", dest_ty),
1368 left: &ImmTy<'tcx, Tag>,
1369 right: &ImmTy<'tcx, Tag>,
1370 ) -> InterpResult<'tcx, Scalar<Tag>> {
1371 assert_eq!(left.layout.ty, right.layout.ty);
1372 let ty::Float(float_ty) = left.layout.ty.kind() else {
1373 bug!("fmax operand is not a float")
1375 let left = left.to_scalar()?;
1376 let right = right.to_scalar()?;
1378 FloatTy::F32 => Scalar::from_f32(left.to_f32()?.max(right.to_f32()?)),
1379 FloatTy::F64 => Scalar::from_f64(left.to_f64()?.max(right.to_f64()?)),
1384 left: &ImmTy<'tcx, Tag>,
1385 right: &ImmTy<'tcx, Tag>,
1386 ) -> InterpResult<'tcx, Scalar<Tag>> {
1387 assert_eq!(left.layout.ty, right.layout.ty);
1388 let ty::Float(float_ty) = left.layout.ty.kind() else {
1389 bug!("fmin operand is not a float")
1391 let left = left.to_scalar()?;
1392 let right = right.to_scalar()?;
1394 FloatTy::F32 => Scalar::from_f32(left.to_f32()?.min(right.to_f32()?)),
1395 FloatTy::F64 => Scalar::from_f64(left.to_f64()?.min(right.to_f64()?)),
1399 fn bool_to_simd_element(b: bool, size: Size) -> Scalar<Tag> {
1400 // SIMD uses all-1 as pattern for "true"
1401 let val = if b { -1 } else { 0 };
1402 Scalar::from_int(val, size)
1405 fn simd_element_to_bool(elem: ImmTy<'_, Tag>) -> InterpResult<'_, bool> {
1406 let val = elem.to_scalar()?.to_int(elem.layout.size)?;
1410 _ => throw_ub_format!("each element of a SIMD mask must be all-0-bits or all-1-bits"),
1414 fn simd_bitmask_index(idx: u64, vec_len: u64, endianess: Endian) -> u64 {
1415 assert!(idx < vec_len);
1417 Endian::Little => idx,
1418 Endian::Big => vec_len - 1 - idx, // reverse order of bits