5 use rustc_apfloat::{Float, Round};
6 use rustc_middle::ty::layout::{HasParamEnv, IntegerExt, LayoutOf};
7 use rustc_middle::{mir, mir::BinOp, ty, ty::FloatTy};
8 use rustc_target::abi::{Align, Integer};
11 use helpers::{bool_to_simd_element, check_arg_count, simd_element_to_bool};
14 MirOp(mir::BinOp, bool),
19 impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
20 pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
23 instance: ty::Instance<'tcx>,
24 args: &[OpTy<'tcx, Tag>],
25 ret: Option<(&PlaceTy<'tcx, Tag>, mir::BasicBlock)>,
26 _unwind: StackPopUnwind,
27 ) -> InterpResult<'tcx> {
28 let this = self.eval_context_mut();
30 if this.emulate_intrinsic(instance, args, ret)? {
34 // All supported intrinsics have a return place.
35 let intrinsic_name = this.tcx.item_name(instance.def_id());
36 let intrinsic_name = intrinsic_name.as_str();
37 let (dest, ret) = match ret {
38 None => throw_unsup_format!("unimplemented (diverging) intrinsic: {}", intrinsic_name),
42 // Then handle terminating intrinsics.
43 match intrinsic_name {
44 // Miri overwriting CTFE intrinsics.
45 "ptr_guaranteed_eq" => {
46 let &[ref left, ref right] = check_arg_count(args)?;
47 let left = this.read_immediate(left)?;
48 let right = this.read_immediate(right)?;
49 this.binop_ignore_overflow(mir::BinOp::Eq, &left, &right, dest)?;
51 "ptr_guaranteed_ne" => {
52 let &[ref left, ref right] = check_arg_count(args)?;
53 let left = this.read_immediate(left)?;
54 let right = this.read_immediate(right)?;
55 this.binop_ignore_overflow(mir::BinOp::Ne, &left, &right, dest)?;
58 // For now, for compatibility with the run-time implementation of this, we just return null.
59 // See <https://github.com/rust-lang/rust/issues/93935>.
60 this.write_null(dest)?;
62 "const_deallocate" => {
66 // Raw memory accesses
68 let &[ref place] = check_arg_count(args)?;
69 let place = this.deref_operand(place)?;
70 this.copy_op(&place.into(), dest)?;
73 let &[ref place, ref dest] = check_arg_count(args)?;
74 let place = this.deref_operand(place)?;
75 this.copy_op(dest, &place.into())?;
78 "write_bytes" | "volatile_set_memory" => {
79 let &[ref ptr, ref val_byte, ref count] = check_arg_count(args)?;
80 let ty = instance.substs.type_at(0);
81 let ty_layout = this.layout_of(ty)?;
82 let val_byte = this.read_scalar(val_byte)?.to_u8()?;
83 let ptr = this.read_pointer(ptr)?;
84 let count = this.read_scalar(count)?.to_machine_usize(this)?;
85 let byte_count = ty_layout.size.checked_mul(count, this).ok_or_else(|| {
86 err_ub_format!("overflow computing total size of `{}`", intrinsic_name)
89 .write_bytes(ptr, iter::repeat(val_byte).take(byte_count.bytes() as usize))?;
92 // Floating-point operations
94 let &[ref f] = check_arg_count(args)?;
95 let f = this.read_scalar(f)?.to_f32()?;
96 // Can be implemented in soft-floats.
97 this.write_scalar(Scalar::from_f32(f.abs()), dest)?;
100 let &[ref f] = check_arg_count(args)?;
101 let f = this.read_scalar(f)?.to_f64()?;
102 // Can be implemented in soft-floats.
103 this.write_scalar(Scalar::from_f64(f.abs()), dest)?;
119 let &[ref f] = check_arg_count(args)?;
120 // FIXME: Using host floats.
121 let f = f32::from_bits(this.read_scalar(f)?.to_u32()?);
122 let f = match intrinsic_name {
125 "sqrtf32" => f.sqrt(),
127 "exp2f32" => f.exp2(),
129 "log10f32" => f.log10(),
130 "log2f32" => f.log2(),
131 "floorf32" => f.floor(),
132 "ceilf32" => f.ceil(),
133 "truncf32" => f.trunc(),
134 "roundf32" => f.round(),
137 this.write_scalar(Scalar::from_u32(f.to_bits()), dest)?;
154 let &[ref f] = check_arg_count(args)?;
155 // FIXME: Using host floats.
156 let f = f64::from_bits(this.read_scalar(f)?.to_u64()?);
157 let f = match intrinsic_name {
160 "sqrtf64" => f.sqrt(),
162 "exp2f64" => f.exp2(),
164 "log10f64" => f.log10(),
165 "log2f64" => f.log2(),
166 "floorf64" => f.floor(),
167 "ceilf64" => f.ceil(),
168 "truncf64" => f.trunc(),
169 "roundf64" => f.round(),
172 this.write_scalar(Scalar::from_u64(f.to_bits()), dest)?;
182 let &[ref a, ref b] = check_arg_count(args)?;
183 let a = this.read_immediate(a)?;
184 let b = this.read_immediate(b)?;
185 let op = match intrinsic_name {
186 "fadd_fast" => mir::BinOp::Add,
187 "fsub_fast" => mir::BinOp::Sub,
188 "fmul_fast" => mir::BinOp::Mul,
189 "fdiv_fast" => mir::BinOp::Div,
190 "frem_fast" => mir::BinOp::Rem,
193 let float_finite = |x: ImmTy<'tcx, _>| -> InterpResult<'tcx, bool> {
194 Ok(match x.layout.ty.kind() {
195 ty::Float(FloatTy::F32) => x.to_scalar()?.to_f32()?.is_finite(),
196 ty::Float(FloatTy::F64) => x.to_scalar()?.to_f64()?.is_finite(),
198 "`{}` called with non-float input type {:?}",
204 match (float_finite(a)?, float_finite(b)?) {
205 (false, false) => throw_ub_format!(
206 "`{}` intrinsic called with non-finite value as both parameters",
209 (false, _) => throw_ub_format!(
210 "`{}` intrinsic called with non-finite value as first parameter",
213 (_, false) => throw_ub_format!(
214 "`{}` intrinsic called with non-finite value as second parameter",
219 this.binop_ignore_overflow(op, &a, &b, dest)?;
227 let &[ref a, ref b] = check_arg_count(args)?;
228 let a = this.read_scalar(a)?.to_f32()?;
229 let b = this.read_scalar(b)?.to_f32()?;
230 let res = match intrinsic_name {
231 "minnumf32" => a.min(b),
232 "maxnumf32" => a.max(b),
233 "copysignf32" => a.copy_sign(b),
236 this.write_scalar(Scalar::from_f32(res), dest)?;
244 let &[ref a, ref b] = check_arg_count(args)?;
245 let a = this.read_scalar(a)?.to_f64()?;
246 let b = this.read_scalar(b)?.to_f64()?;
247 let res = match intrinsic_name {
248 "minnumf64" => a.min(b),
249 "maxnumf64" => a.max(b),
250 "copysignf64" => a.copy_sign(b),
253 this.write_scalar(Scalar::from_f64(res), dest)?;
257 let &[ref f, ref f2] = check_arg_count(args)?;
258 // FIXME: Using host floats.
259 let f = f32::from_bits(this.read_scalar(f)?.to_u32()?);
260 let f2 = f32::from_bits(this.read_scalar(f2)?.to_u32()?);
261 this.write_scalar(Scalar::from_u32(f.powf(f2).to_bits()), dest)?;
265 let &[ref f, ref f2] = check_arg_count(args)?;
266 // FIXME: Using host floats.
267 let f = f64::from_bits(this.read_scalar(f)?.to_u64()?);
268 let f2 = f64::from_bits(this.read_scalar(f2)?.to_u64()?);
269 this.write_scalar(Scalar::from_u64(f.powf(f2).to_bits()), dest)?;
273 let &[ref a, ref b, ref c] = check_arg_count(args)?;
274 let a = this.read_scalar(a)?.to_f32()?;
275 let b = this.read_scalar(b)?.to_f32()?;
276 let c = this.read_scalar(c)?.to_f32()?;
277 let res = a.mul_add(b, c).value;
278 this.write_scalar(Scalar::from_f32(res), dest)?;
282 let &[ref a, ref b, ref c] = check_arg_count(args)?;
283 let a = this.read_scalar(a)?.to_f64()?;
284 let b = this.read_scalar(b)?.to_f64()?;
285 let c = this.read_scalar(c)?.to_f64()?;
286 let res = a.mul_add(b, c).value;
287 this.write_scalar(Scalar::from_f64(res), dest)?;
291 let &[ref f, ref i] = check_arg_count(args)?;
292 // FIXME: Using host floats.
293 let f = f32::from_bits(this.read_scalar(f)?.to_u32()?);
294 let i = this.read_scalar(i)?.to_i32()?;
295 this.write_scalar(Scalar::from_u32(f.powi(i).to_bits()), dest)?;
299 let &[ref f, ref i] = check_arg_count(args)?;
300 // FIXME: Using host floats.
301 let f = f64::from_bits(this.read_scalar(f)?.to_u64()?);
302 let i = this.read_scalar(i)?.to_i32()?;
303 this.write_scalar(Scalar::from_u64(f.powi(i).to_bits()), dest)?;
306 "float_to_int_unchecked" => {
307 let &[ref val] = check_arg_count(args)?;
308 let val = this.read_immediate(val)?;
310 let res = match val.layout.ty.kind() {
311 ty::Float(FloatTy::F32) =>
312 this.float_to_int_unchecked(val.to_scalar()?.to_f32()?, dest.layout.ty)?,
313 ty::Float(FloatTy::F64) =>
314 this.float_to_int_unchecked(val.to_scalar()?.to_f64()?, dest.layout.ty)?,
317 "`float_to_int_unchecked` called with non-float input type {:?}",
322 this.write_scalar(res, dest)?;
334 let &[ref op] = check_arg_count(args)?;
335 let (op, op_len) = this.operand_to_simd(op)?;
336 let (dest, dest_len) = this.place_to_simd(dest)?;
338 assert_eq!(dest_len, op_len);
340 #[derive(Copy, Clone)]
348 #[derive(Copy, Clone)]
354 let which = match intrinsic_name {
355 "simd_neg" => Op::MirOp(mir::UnOp::Neg),
356 "simd_fabs" => Op::Abs,
357 "simd_ceil" => Op::HostOp(HostFloatOp::Ceil),
358 "simd_floor" => Op::HostOp(HostFloatOp::Floor),
359 "simd_round" => Op::HostOp(HostFloatOp::Round),
360 "simd_trunc" => Op::HostOp(HostFloatOp::Trunc),
361 "simd_fsqrt" => Op::HostOp(HostFloatOp::Sqrt),
365 for i in 0..dest_len {
366 let op = this.read_immediate(&this.mplace_index(&op, i)?.into())?;
367 let dest = this.mplace_index(&dest, i)?;
368 let val = match which {
369 Op::MirOp(mir_op) => this.unary_op(mir_op, &op)?.to_scalar()?,
371 // Works for f32 and f64.
372 let ty::Float(float_ty) = op.layout.ty.kind() else {
373 bug!("{} operand is not a float", intrinsic_name)
375 let op = op.to_scalar()?;
377 FloatTy::F32 => Scalar::from_f32(op.to_f32()?.abs()),
378 FloatTy::F64 => Scalar::from_f64(op.to_f64()?.abs()),
381 Op::HostOp(host_op) => {
382 let ty::Float(float_ty) = op.layout.ty.kind() else {
383 bug!("{} operand is not a float", intrinsic_name)
385 // FIXME using host floats
388 let f = f32::from_bits(op.to_scalar()?.to_u32()?);
389 let res = match host_op {
390 HostFloatOp::Ceil => f.ceil(),
391 HostFloatOp::Floor => f.floor(),
392 HostFloatOp::Round => f.round(),
393 HostFloatOp::Trunc => f.trunc(),
394 HostFloatOp::Sqrt => f.sqrt(),
396 Scalar::from_u32(res.to_bits())
399 let f = f64::from_bits(op.to_scalar()?.to_u64()?);
400 let res = match host_op {
401 HostFloatOp::Ceil => f.ceil(),
402 HostFloatOp::Floor => f.floor(),
403 HostFloatOp::Round => f.round(),
404 HostFloatOp::Trunc => f.trunc(),
405 HostFloatOp::Sqrt => f.sqrt(),
407 Scalar::from_u64(res.to_bits())
413 this.write_scalar(val, &dest.into())?;
435 | "simd_saturating_add"
436 | "simd_saturating_sub" => {
439 let &[ref left, ref right] = check_arg_count(args)?;
440 let (left, left_len) = this.operand_to_simd(left)?;
441 let (right, right_len) = this.operand_to_simd(right)?;
442 let (dest, dest_len) = this.place_to_simd(dest)?;
444 assert_eq!(dest_len, left_len);
445 assert_eq!(dest_len, right_len);
453 let which = match intrinsic_name {
454 "simd_add" => Op::MirOp(BinOp::Add),
455 "simd_sub" => Op::MirOp(BinOp::Sub),
456 "simd_mul" => Op::MirOp(BinOp::Mul),
457 "simd_div" => Op::MirOp(BinOp::Div),
458 "simd_rem" => Op::MirOp(BinOp::Rem),
459 "simd_shl" => Op::MirOp(BinOp::Shl),
460 "simd_shr" => Op::MirOp(BinOp::Shr),
461 "simd_and" => Op::MirOp(BinOp::BitAnd),
462 "simd_or" => Op::MirOp(BinOp::BitOr),
463 "simd_xor" => Op::MirOp(BinOp::BitXor),
464 "simd_eq" => Op::MirOp(BinOp::Eq),
465 "simd_ne" => Op::MirOp(BinOp::Ne),
466 "simd_lt" => Op::MirOp(BinOp::Lt),
467 "simd_le" => Op::MirOp(BinOp::Le),
468 "simd_gt" => Op::MirOp(BinOp::Gt),
469 "simd_ge" => Op::MirOp(BinOp::Ge),
470 "simd_fmax" => Op::FMax,
471 "simd_fmin" => Op::FMin,
472 "simd_saturating_add" => Op::SaturatingOp(BinOp::Add),
473 "simd_saturating_sub" => Op::SaturatingOp(BinOp::Sub),
477 for i in 0..dest_len {
478 let left = this.read_immediate(&this.mplace_index(&left, i)?.into())?;
479 let right = this.read_immediate(&this.mplace_index(&right, i)?.into())?;
480 let dest = this.mplace_index(&dest, i)?;
481 let val = match which {
482 Op::MirOp(mir_op) => {
483 let (val, overflowed, ty) = this.overflowing_binary_op(mir_op, &left, &right)?;
484 if matches!(mir_op, BinOp::Shl | BinOp::Shr) {
485 // Shifts have extra UB as SIMD operations that the MIR binop does not have.
486 // See <https://github.com/rust-lang/rust/issues/91237>.
488 let r_val = right.to_scalar()?.to_bits(right.layout.size)?;
489 throw_ub_format!("overflowing shift by {} in `{}` in SIMD lane {}", r_val, intrinsic_name, i);
492 if matches!(mir_op, BinOp::Eq | BinOp::Ne | BinOp::Lt | BinOp::Le | BinOp::Gt | BinOp::Ge) {
493 // Special handling for boolean-returning operations
494 assert_eq!(ty, this.tcx.types.bool);
495 let val = val.to_bool().unwrap();
496 bool_to_simd_element(val, dest.layout.size)
498 assert_ne!(ty, this.tcx.types.bool);
499 assert_eq!(ty, dest.layout.ty);
504 fmax_op(&left, &right)?
507 fmin_op(&left, &right)?
509 Op::SaturatingOp(mir_op) => {
510 this.saturating_arith(mir_op, &left, &right)?
513 this.write_scalar(val, &dest.into())?;
517 let &[ref a, ref b, ref c] = check_arg_count(args)?;
518 let (a, a_len) = this.operand_to_simd(a)?;
519 let (b, b_len) = this.operand_to_simd(b)?;
520 let (c, c_len) = this.operand_to_simd(c)?;
521 let (dest, dest_len) = this.place_to_simd(dest)?;
523 assert_eq!(dest_len, a_len);
524 assert_eq!(dest_len, b_len);
525 assert_eq!(dest_len, c_len);
527 for i in 0..dest_len {
528 let a = this.read_immediate(&this.mplace_index(&a, i)?.into())?.to_scalar()?;
529 let b = this.read_immediate(&this.mplace_index(&b, i)?.into())?.to_scalar()?;
530 let c = this.read_immediate(&this.mplace_index(&c, i)?.into())?.to_scalar()?;
531 let dest = this.mplace_index(&dest, i)?;
533 // Works for f32 and f64.
534 let ty::Float(float_ty) = dest.layout.ty.kind() else {
535 bug!("{} operand is not a float", intrinsic_name)
537 let val = match float_ty {
539 Scalar::from_f32(a.to_f32()?.mul_add(b.to_f32()?, c.to_f32()?).value),
541 Scalar::from_f64(a.to_f64()?.mul_add(b.to_f64()?, c.to_f64()?).value),
543 this.write_scalar(val, &dest.into())?;
553 | "simd_reduce_min" => {
556 let &[ref op] = check_arg_count(args)?;
557 let (op, op_len) = this.operand_to_simd(op)?;
560 |b| ImmTy::from_scalar(Scalar::from_bool(b), this.machine.layouts.bool);
568 let which = match intrinsic_name {
569 "simd_reduce_and" => Op::MirOp(BinOp::BitAnd),
570 "simd_reduce_or" => Op::MirOp(BinOp::BitOr),
571 "simd_reduce_xor" => Op::MirOp(BinOp::BitXor),
572 "simd_reduce_any" => Op::MirOpBool(BinOp::BitOr),
573 "simd_reduce_all" => Op::MirOpBool(BinOp::BitAnd),
574 "simd_reduce_max" => Op::Max,
575 "simd_reduce_min" => Op::Min,
579 // Initialize with first lane, then proceed with the rest.
580 let mut res = this.read_immediate(&this.mplace_index(&op, 0)?.into())?;
581 if matches!(which, Op::MirOpBool(_)) {
582 // Convert to `bool` scalar.
583 res = imm_from_bool(simd_element_to_bool(res)?);
586 let op = this.read_immediate(&this.mplace_index(&op, i)?.into())?;
588 Op::MirOp(mir_op) => {
589 this.binary_op(mir_op, &res, &op)?
591 Op::MirOpBool(mir_op) => {
592 let op = imm_from_bool(simd_element_to_bool(op)?);
593 this.binary_op(mir_op, &res, &op)?
596 if matches!(res.layout.ty.kind(), ty::Float(_)) {
597 ImmTy::from_scalar(fmax_op(&res, &op)?, res.layout)
599 // Just boring integers, so NaNs to worry about
600 if this.binary_op(BinOp::Ge, &res, &op)?.to_scalar()?.to_bool()? {
608 if matches!(res.layout.ty.kind(), ty::Float(_)) {
609 ImmTy::from_scalar(fmin_op(&res, &op)?, res.layout)
611 // Just boring integers, so NaNs to worry about
612 if this.binary_op(BinOp::Le, &res, &op)?.to_scalar()?.to_bool()? {
621 this.write_immediate(*res, dest)?;
624 | "simd_reduce_add_ordered"
625 | "simd_reduce_mul_ordered" => {
628 let &[ref op, ref init] = check_arg_count(args)?;
629 let (op, op_len) = this.operand_to_simd(op)?;
630 let init = this.read_immediate(init)?;
632 let mir_op = match intrinsic_name {
633 "simd_reduce_add_ordered" => BinOp::Add,
634 "simd_reduce_mul_ordered" => BinOp::Mul,
640 let op = this.read_immediate(&this.mplace_index(&op, i)?.into())?;
641 res = this.binary_op(mir_op, &res, &op)?;
643 this.write_immediate(*res, dest)?;
646 let &[ref mask, ref yes, ref no] = check_arg_count(args)?;
647 let (mask, mask_len) = this.operand_to_simd(mask)?;
648 let (yes, yes_len) = this.operand_to_simd(yes)?;
649 let (no, no_len) = this.operand_to_simd(no)?;
650 let (dest, dest_len) = this.place_to_simd(dest)?;
652 assert_eq!(dest_len, mask_len);
653 assert_eq!(dest_len, yes_len);
654 assert_eq!(dest_len, no_len);
656 for i in 0..dest_len {
657 let mask = this.read_immediate(&this.mplace_index(&mask, i)?.into())?;
658 let yes = this.read_immediate(&this.mplace_index(&yes, i)?.into())?;
659 let no = this.read_immediate(&this.mplace_index(&no, i)?.into())?;
660 let dest = this.mplace_index(&dest, i)?;
662 let val = if simd_element_to_bool(mask)? { yes } else { no };
663 this.write_immediate(*val, &dest.into())?;
667 "simd_cast" | "simd_as" => {
668 let &[ref op] = check_arg_count(args)?;
669 let (op, op_len) = this.operand_to_simd(op)?;
670 let (dest, dest_len) = this.place_to_simd(dest)?;
672 assert_eq!(dest_len, op_len);
674 let safe_cast = intrinsic_name == "simd_as";
676 for i in 0..dest_len {
677 let op = this.read_immediate(&this.mplace_index(&op, i)?.into())?;
678 let dest = this.mplace_index(&dest, i)?;
680 let val = match (op.layout.ty.kind(), dest.layout.ty.kind()) {
681 // Int-to-(int|float): always safe
682 (ty::Int(_) | ty::Uint(_), ty::Int(_) | ty::Uint(_) | ty::Float(_)) =>
683 this.misc_cast(&op, dest.layout.ty)?,
684 // Float-to-float: always safe
685 (ty::Float(_), ty::Float(_)) =>
686 this.misc_cast(&op, dest.layout.ty)?,
687 // Float-to-int in safe mode
688 (ty::Float(_), ty::Int(_) | ty::Uint(_)) if safe_cast =>
689 this.misc_cast(&op, dest.layout.ty)?,
690 // Float-to-int in unchecked mode
691 (ty::Float(FloatTy::F32), ty::Int(_) | ty::Uint(_)) if !safe_cast =>
692 this.float_to_int_unchecked(op.to_scalar()?.to_f32()?, dest.layout.ty)?.into(),
693 (ty::Float(FloatTy::F64), ty::Int(_) | ty::Uint(_)) if !safe_cast =>
694 this.float_to_int_unchecked(op.to_scalar()?.to_f64()?, dest.layout.ty)?.into(),
697 "Unsupported SIMD cast from element type {} to {}",
702 this.write_immediate(val, &dest.into())?;
706 let &[ref left, ref right, ref index] = check_arg_count(args)?;
707 let (left, left_len) = this.operand_to_simd(left)?;
708 let (right, right_len) = this.operand_to_simd(right)?;
709 let (dest, dest_len) = this.place_to_simd(dest)?;
711 // `index` is an array, not a SIMD type
712 let ty::Array(_, index_len) = index.layout.ty.kind() else {
713 bug!("simd_shuffle index argument has non-array type {}", index.layout.ty)
715 let index_len = index_len.eval_usize(*this.tcx, this.param_env());
717 assert_eq!(left_len, right_len);
718 assert_eq!(index_len, dest_len);
720 for i in 0..dest_len {
721 let src_index: u64 = this
722 .read_immediate(&this.operand_index(&index, i)?.into())?
726 let dest = this.mplace_index(&dest, i)?;
728 let val = if src_index < left_len {
729 this.read_immediate(&this.mplace_index(&left, src_index)?.into())?
730 } else if src_index < left_len.checked_add(right_len).unwrap() {
732 &this.mplace_index(&right, src_index - left_len)?.into(),
736 "simd_shuffle index {} is out of bounds for 2 vectors of size {}",
741 this.write_immediate(*val, &dest.into())?;
745 let &[ref passthru, ref ptrs, ref mask] = check_arg_count(args)?;
746 let (passthru, passthru_len) = this.operand_to_simd(passthru)?;
747 let (ptrs, ptrs_len) = this.operand_to_simd(ptrs)?;
748 let (mask, mask_len) = this.operand_to_simd(mask)?;
749 let (dest, dest_len) = this.place_to_simd(dest)?;
751 assert_eq!(dest_len, passthru_len);
752 assert_eq!(dest_len, ptrs_len);
753 assert_eq!(dest_len, mask_len);
755 for i in 0..dest_len {
756 let passthru = this.read_immediate(&this.mplace_index(&passthru, i)?.into())?;
757 let ptr = this.read_immediate(&this.mplace_index(&ptrs, i)?.into())?;
758 let mask = this.read_immediate(&this.mplace_index(&mask, i)?.into())?;
759 let dest = this.mplace_index(&dest, i)?;
761 let val = if simd_element_to_bool(mask)? {
762 let place = this.deref_operand(&ptr.into())?;
763 this.read_immediate(&place.into())?
767 this.write_immediate(*val, &dest.into())?;
771 let &[ref value, ref ptrs, ref mask] = check_arg_count(args)?;
772 let (value, value_len) = this.operand_to_simd(value)?;
773 let (ptrs, ptrs_len) = this.operand_to_simd(ptrs)?;
774 let (mask, mask_len) = this.operand_to_simd(mask)?;
776 assert_eq!(ptrs_len, value_len);
777 assert_eq!(ptrs_len, mask_len);
779 for i in 0..ptrs_len {
780 let value = this.read_immediate(&this.mplace_index(&value, i)?.into())?;
781 let ptr = this.read_immediate(&this.mplace_index(&ptrs, i)?.into())?;
782 let mask = this.read_immediate(&this.mplace_index(&mask, i)?.into())?;
784 if simd_element_to_bool(mask)? {
785 let place = this.deref_operand(&ptr.into())?;
786 this.write_immediate(*value, &place.into())?;
792 "atomic_load" => this.atomic_load(args, dest, AtomicReadOp::SeqCst)?,
793 "atomic_load_relaxed" => this.atomic_load(args, dest, AtomicReadOp::Relaxed)?,
794 "atomic_load_acq" => this.atomic_load(args, dest, AtomicReadOp::Acquire)?,
796 "atomic_store" => this.atomic_store(args, AtomicWriteOp::SeqCst)?,
797 "atomic_store_relaxed" => this.atomic_store(args, AtomicWriteOp::Relaxed)?,
798 "atomic_store_rel" => this.atomic_store(args, AtomicWriteOp::Release)?,
800 "atomic_fence_acq" => this.atomic_fence(args, AtomicFenceOp::Acquire)?,
801 "atomic_fence_rel" => this.atomic_fence(args, AtomicFenceOp::Release)?,
802 "atomic_fence_acqrel" => this.atomic_fence(args, AtomicFenceOp::AcqRel)?,
803 "atomic_fence" => this.atomic_fence(args, AtomicFenceOp::SeqCst)?,
805 "atomic_singlethreadfence_acq" => this.compiler_fence(args, AtomicFenceOp::Acquire)?,
806 "atomic_singlethreadfence_rel" => this.compiler_fence(args, AtomicFenceOp::Release)?,
807 "atomic_singlethreadfence_acqrel" =>
808 this.compiler_fence(args, AtomicFenceOp::AcqRel)?,
809 "atomic_singlethreadfence" => this.compiler_fence(args, AtomicFenceOp::SeqCst)?,
811 "atomic_xchg" => this.atomic_exchange(args, dest, AtomicRwOp::SeqCst)?,
812 "atomic_xchg_acq" => this.atomic_exchange(args, dest, AtomicRwOp::Acquire)?,
813 "atomic_xchg_rel" => this.atomic_exchange(args, dest, AtomicRwOp::Release)?,
814 "atomic_xchg_acqrel" => this.atomic_exchange(args, dest, AtomicRwOp::AcqRel)?,
815 "atomic_xchg_relaxed" => this.atomic_exchange(args, dest, AtomicRwOp::Relaxed)?,
819 this.atomic_compare_exchange(args, dest, AtomicRwOp::SeqCst, AtomicReadOp::SeqCst)?,
821 "atomic_cxchg_acq" =>
822 this.atomic_compare_exchange(args, dest, AtomicRwOp::Acquire, AtomicReadOp::Acquire)?,
824 "atomic_cxchg_rel" =>
825 this.atomic_compare_exchange(args, dest, AtomicRwOp::Release, AtomicReadOp::Relaxed)?,
827 "atomic_cxchg_acqrel" =>
828 this.atomic_compare_exchange(args, dest, AtomicRwOp::AcqRel, AtomicReadOp::Acquire)?,
830 "atomic_cxchg_relaxed" =>
831 this.atomic_compare_exchange(args, dest, AtomicRwOp::Relaxed, AtomicReadOp::Relaxed)?,
833 "atomic_cxchg_acq_failrelaxed" =>
834 this.atomic_compare_exchange(args, dest, AtomicRwOp::Acquire, AtomicReadOp::Relaxed)?,
836 "atomic_cxchg_acqrel_failrelaxed" =>
837 this.atomic_compare_exchange(args, dest, AtomicRwOp::AcqRel, AtomicReadOp::Relaxed)?,
839 "atomic_cxchg_failrelaxed" =>
840 this.atomic_compare_exchange(args, dest, AtomicRwOp::SeqCst, AtomicReadOp::Relaxed)?,
842 "atomic_cxchg_failacq" =>
843 this.atomic_compare_exchange(args, dest, AtomicRwOp::SeqCst, AtomicReadOp::Acquire)?,
846 "atomic_cxchgweak" =>
847 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::SeqCst, AtomicReadOp::SeqCst)?,
849 "atomic_cxchgweak_acq" =>
850 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::Acquire, AtomicReadOp::Acquire)?,
852 "atomic_cxchgweak_rel" =>
853 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::Release, AtomicReadOp::Relaxed)?,
855 "atomic_cxchgweak_acqrel" =>
856 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::AcqRel, AtomicReadOp::Acquire)?,
858 "atomic_cxchgweak_relaxed" =>
859 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::Relaxed, AtomicReadOp::Relaxed)?,
861 "atomic_cxchgweak_acq_failrelaxed" =>
862 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::Acquire, AtomicReadOp::Relaxed)?,
864 "atomic_cxchgweak_acqrel_failrelaxed" =>
865 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::AcqRel, AtomicReadOp::Relaxed)?,
867 "atomic_cxchgweak_failrelaxed" =>
868 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::SeqCst, AtomicReadOp::Relaxed)?,
870 "atomic_cxchgweak_failacq" =>
871 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::SeqCst, AtomicReadOp::Acquire)?,
875 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOp::SeqCst)?,
878 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOp::Acquire)?,
881 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOp::Release)?,
883 "atomic_or_acqrel" =>
884 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOp::AcqRel)?,
886 "atomic_or_relaxed" =>
887 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOp::Relaxed)?,
890 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOp::SeqCst)?,
893 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOp::Acquire)?,
896 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOp::Release)?,
898 "atomic_xor_acqrel" =>
899 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOp::AcqRel)?,
901 "atomic_xor_relaxed" =>
902 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOp::Relaxed)?,
905 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOp::SeqCst)?,
908 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOp::Acquire)?,
911 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOp::Release)?,
913 "atomic_and_acqrel" =>
914 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOp::AcqRel)?,
916 "atomic_and_relaxed" =>
917 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOp::Relaxed)?,
920 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOp::SeqCst)?,
923 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOp::Acquire)?,
926 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOp::Release)?,
928 "atomic_nand_acqrel" =>
929 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOp::AcqRel)?,
931 "atomic_nand_relaxed" =>
932 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOp::Relaxed)?,
935 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOp::SeqCst)?,
938 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOp::Acquire)?,
941 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOp::Release)?,
943 "atomic_xadd_acqrel" =>
944 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOp::AcqRel)?,
946 "atomic_xadd_relaxed" =>
947 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOp::Relaxed)?,
950 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOp::SeqCst)?,
953 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOp::Acquire)?,
956 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOp::Release)?,
958 "atomic_xsub_acqrel" =>
959 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOp::AcqRel)?,
961 "atomic_xsub_relaxed" =>
962 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOp::Relaxed)?,
963 "atomic_min" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::SeqCst)?,
964 "atomic_min_acq" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::Acquire)?,
965 "atomic_min_rel" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::Release)?,
966 "atomic_min_acqrel" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::AcqRel)?,
967 "atomic_min_relaxed" =>
968 this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::Relaxed)?,
969 "atomic_max" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::SeqCst)?,
970 "atomic_max_acq" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::Acquire)?,
971 "atomic_max_rel" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::Release)?,
972 "atomic_max_acqrel" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::AcqRel)?,
973 "atomic_max_relaxed" =>
974 this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::Relaxed)?,
975 "atomic_umin" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::SeqCst)?,
976 "atomic_umin_acq" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::Acquire)?,
977 "atomic_umin_rel" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::Release)?,
978 "atomic_umin_acqrel" =>
979 this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::AcqRel)?,
980 "atomic_umin_relaxed" =>
981 this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::Relaxed)?,
982 "atomic_umax" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::SeqCst)?,
983 "atomic_umax_acq" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::Acquire)?,
984 "atomic_umax_rel" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::Release)?,
985 "atomic_umax_acqrel" =>
986 this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::AcqRel)?,
987 "atomic_umax_relaxed" =>
988 this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::Relaxed)?,
992 let &[ref num, ref denom] = check_arg_count(args)?;
993 this.exact_div(&this.read_immediate(num)?, &this.read_immediate(denom)?, dest)?;
996 "try" => return this.handle_try(args, dest, ret),
999 let &[] = check_arg_count(args)?;
1000 // normally this would raise a SIGTRAP, which aborts if no debugger is connected
1001 throw_machine_stop!(TerminationInfo::Abort("Trace/breakpoint trap".to_string()))
1004 name => throw_unsup_format!("unimplemented intrinsic: {}", name),
1007 trace!("{:?}", this.dump_place(**dest));
1008 this.go_to_block(ret);
1014 args: &[OpTy<'tcx, Tag>],
1015 dest: &PlaceTy<'tcx, Tag>,
1016 atomic: AtomicReadOp,
1017 ) -> InterpResult<'tcx> {
1018 let this = self.eval_context_mut();
1020 let &[ref place] = check_arg_count(args)?;
1021 let place = this.deref_operand(place)?;
1023 // make sure it fits into a scalar; otherwise it cannot be atomic
1024 let val = this.read_scalar_atomic(&place, atomic)?;
1026 // Check alignment requirements. Atomics must always be aligned to their size,
1027 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
1029 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
1030 this.memory.check_ptr_access_align(
1034 CheckInAllocMsg::MemoryAccessTest,
1036 // Perform regular access.
1037 this.write_scalar(val, dest)?;
1043 args: &[OpTy<'tcx, Tag>],
1044 atomic: AtomicWriteOp,
1045 ) -> InterpResult<'tcx> {
1046 let this = self.eval_context_mut();
1048 let &[ref place, ref val] = check_arg_count(args)?;
1049 let place = this.deref_operand(place)?;
1050 let val = this.read_scalar(val)?; // make sure it fits into a scalar; otherwise it cannot be atomic
1052 // Check alignment requirements. Atomics must always be aligned to their size,
1053 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
1055 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
1056 this.memory.check_ptr_access_align(
1060 CheckInAllocMsg::MemoryAccessTest,
1063 // Perform atomic store
1064 this.write_scalar_atomic(val, &place, atomic)?;
1070 args: &[OpTy<'tcx, Tag>],
1071 atomic: AtomicFenceOp,
1072 ) -> InterpResult<'tcx> {
1073 let &[] = check_arg_count(args)?;
1075 //FIXME: compiler fences are currently ignored
1081 args: &[OpTy<'tcx, Tag>],
1082 atomic: AtomicFenceOp,
1083 ) -> InterpResult<'tcx> {
1084 let this = self.eval_context_mut();
1085 let &[] = check_arg_count(args)?;
1086 this.validate_atomic_fence(atomic)?;
1092 args: &[OpTy<'tcx, Tag>],
1093 dest: &PlaceTy<'tcx, Tag>,
1094 atomic_op: AtomicOp,
1096 ) -> InterpResult<'tcx> {
1097 let this = self.eval_context_mut();
1099 let &[ref place, ref rhs] = check_arg_count(args)?;
1100 let place = this.deref_operand(place)?;
1102 if !place.layout.ty.is_integral() {
1103 bug!("Atomic arithmetic operations only work on integer types");
1105 let rhs = this.read_immediate(rhs)?;
1107 // Check alignment requirements. Atomics must always be aligned to their size,
1108 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
1110 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
1111 this.memory.check_ptr_access_align(
1115 CheckInAllocMsg::MemoryAccessTest,
1120 let old = this.atomic_min_max_scalar(&place, rhs, true, atomic)?;
1121 this.write_immediate(*old, &dest)?; // old value is returned
1125 let old = this.atomic_min_max_scalar(&place, rhs, false, atomic)?;
1126 this.write_immediate(*old, &dest)?; // old value is returned
1129 AtomicOp::MirOp(op, neg) => {
1130 let old = this.atomic_op_immediate(&place, &rhs, op, neg, atomic)?;
1131 this.write_immediate(*old, dest)?; // old value is returned
1139 args: &[OpTy<'tcx, Tag>],
1140 dest: &PlaceTy<'tcx, Tag>,
1142 ) -> InterpResult<'tcx> {
1143 let this = self.eval_context_mut();
1145 let &[ref place, ref new] = check_arg_count(args)?;
1146 let place = this.deref_operand(place)?;
1147 let new = this.read_scalar(new)?;
1149 // Check alignment requirements. Atomics must always be aligned to their size,
1150 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
1152 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
1153 this.memory.check_ptr_access_align(
1157 CheckInAllocMsg::MemoryAccessTest,
1160 let old = this.atomic_exchange_scalar(&place, new, atomic)?;
1161 this.write_scalar(old, dest)?; // old value is returned
1165 fn atomic_compare_exchange_impl(
1167 args: &[OpTy<'tcx, Tag>],
1168 dest: &PlaceTy<'tcx, Tag>,
1169 success: AtomicRwOp,
1171 can_fail_spuriously: bool,
1172 ) -> InterpResult<'tcx> {
1173 let this = self.eval_context_mut();
1175 let &[ref place, ref expect_old, ref new] = check_arg_count(args)?;
1176 let place = this.deref_operand(place)?;
1177 let expect_old = this.read_immediate(expect_old)?; // read as immediate for the sake of `binary_op()`
1178 let new = this.read_scalar(new)?;
1180 // Check alignment requirements. Atomics must always be aligned to their size,
1181 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
1183 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
1184 this.memory.check_ptr_access_align(
1188 CheckInAllocMsg::MemoryAccessTest,
1191 let old = this.atomic_compare_exchange_scalar(
1197 can_fail_spuriously,
1200 // Return old value.
1201 this.write_immediate(old, dest)?;
1205 fn atomic_compare_exchange(
1207 args: &[OpTy<'tcx, Tag>],
1208 dest: &PlaceTy<'tcx, Tag>,
1209 success: AtomicRwOp,
1211 ) -> InterpResult<'tcx> {
1212 self.atomic_compare_exchange_impl(args, dest, success, fail, false)
1215 fn atomic_compare_exchange_weak(
1217 args: &[OpTy<'tcx, Tag>],
1218 dest: &PlaceTy<'tcx, Tag>,
1219 success: AtomicRwOp,
1221 ) -> InterpResult<'tcx> {
1222 self.atomic_compare_exchange_impl(args, dest, success, fail, true)
1225 fn float_to_int_unchecked<F>(
1228 dest_ty: ty::Ty<'tcx>,
1229 ) -> InterpResult<'tcx, Scalar<Tag>>
1231 F: Float + Into<Scalar<Tag>>,
1233 let this = self.eval_context_ref();
1235 // Step 1: cut off the fractional part of `f`. The result of this is
1236 // guaranteed to be precisely representable in IEEE floats.
1237 let f = f.round_to_integral(Round::TowardZero).value;
1239 // Step 2: Cast the truncated float to the target integer type and see if we lose any information in this step.
1240 Ok(match dest_ty.kind() {
1243 let size = Integer::from_uint_ty(this, *t).size();
1244 let res = f.to_u128(size.bits_usize());
1245 if res.status.is_empty() {
1246 // No status flags means there was no further rounding or other loss of precision.
1247 Scalar::from_uint(res.value, size)
1249 // `f` was not representable in this integer type.
1251 "`float_to_int_unchecked` intrinsic called on {} which cannot be represented in target type `{:?}`",
1259 let size = Integer::from_int_ty(this, *t).size();
1260 let res = f.to_i128(size.bits_usize());
1261 if res.status.is_empty() {
1262 // No status flags means there was no further rounding or other loss of precision.
1263 Scalar::from_int(res.value, size)
1265 // `f` was not representable in this integer type.
1267 "`float_to_int_unchecked` intrinsic called on {} which cannot be represented in target type `{:?}`",
1274 _ => bug!("`float_to_int_unchecked` called with non-int output type {:?}", dest_ty),
1280 left: &ImmTy<'tcx, Tag>,
1281 right: &ImmTy<'tcx, Tag>,
1282 ) -> InterpResult<'tcx, Scalar<Tag>> {
1283 assert_eq!(left.layout.ty, right.layout.ty);
1284 let ty::Float(float_ty) = left.layout.ty.kind() else {
1285 bug!("fmax operand is not a float")
1287 let left = left.to_scalar()?;
1288 let right = right.to_scalar()?;
1290 FloatTy::F32 => Scalar::from_f32(left.to_f32()?.max(right.to_f32()?)),
1291 FloatTy::F64 => Scalar::from_f64(left.to_f64()?.max(right.to_f64()?)),
1296 left: &ImmTy<'tcx, Tag>,
1297 right: &ImmTy<'tcx, Tag>,
1298 ) -> InterpResult<'tcx, Scalar<Tag>> {
1299 assert_eq!(left.layout.ty, right.layout.ty);
1300 let ty::Float(float_ty) = left.layout.ty.kind() else {
1301 bug!("fmin operand is not a float")
1303 let left = left.to_scalar()?;
1304 let right = right.to_scalar()?;
1306 FloatTy::F32 => Scalar::from_f32(left.to_f32()?.min(right.to_f32()?)),
1307 FloatTy::F64 => Scalar::from_f64(left.to_f64()?.min(right.to_f64()?)),