5 use rustc_apfloat::{Float, Round};
6 use rustc_middle::ty::layout::{IntegerExt, LayoutOf};
7 use rustc_middle::{mir, mir::BinOp, ty, ty::FloatTy};
8 use rustc_target::abi::{Align, Integer};
11 use helpers::{bool_to_simd_element, check_arg_count, simd_element_to_bool};
14 MirOp(mir::BinOp, bool),
19 impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
20 pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
23 instance: ty::Instance<'tcx>,
24 args: &[OpTy<'tcx, Tag>],
25 ret: Option<(&PlaceTy<'tcx, Tag>, mir::BasicBlock)>,
26 _unwind: StackPopUnwind,
27 ) -> InterpResult<'tcx> {
28 let this = self.eval_context_mut();
30 if this.emulate_intrinsic(instance, args, ret)? {
34 // All supported intrinsics have a return place.
35 let intrinsic_name = this.tcx.item_name(instance.def_id());
36 let intrinsic_name = intrinsic_name.as_str();
37 let (dest, ret) = match ret {
38 None => throw_unsup_format!("unimplemented (diverging) intrinsic: {}", intrinsic_name),
42 // Then handle terminating intrinsics.
43 match intrinsic_name {
44 // Miri overwriting CTFE intrinsics.
45 "ptr_guaranteed_eq" => {
46 let &[ref left, ref right] = check_arg_count(args)?;
47 let left = this.read_immediate(left)?;
48 let right = this.read_immediate(right)?;
49 this.binop_ignore_overflow(mir::BinOp::Eq, &left, &right, dest)?;
51 "ptr_guaranteed_ne" => {
52 let &[ref left, ref right] = check_arg_count(args)?;
53 let left = this.read_immediate(left)?;
54 let right = this.read_immediate(right)?;
55 this.binop_ignore_overflow(mir::BinOp::Ne, &left, &right, dest)?;
58 // For now, for compatibility with the run-time implementation of this, we just return null.
59 // See <https://github.com/rust-lang/rust/issues/93935>.
60 this.write_null(dest)?;
62 "const_deallocate" => {
66 // Raw memory accesses
68 let &[ref place] = check_arg_count(args)?;
69 let place = this.deref_operand(place)?;
70 this.copy_op(&place.into(), dest)?;
73 let &[ref place, ref dest] = check_arg_count(args)?;
74 let place = this.deref_operand(place)?;
75 this.copy_op(dest, &place.into())?;
78 "write_bytes" | "volatile_set_memory" => {
79 let &[ref ptr, ref val_byte, ref count] = check_arg_count(args)?;
80 let ty = instance.substs.type_at(0);
81 let ty_layout = this.layout_of(ty)?;
82 let val_byte = this.read_scalar(val_byte)?.to_u8()?;
83 let ptr = this.read_pointer(ptr)?;
84 let count = this.read_scalar(count)?.to_machine_usize(this)?;
85 let byte_count = ty_layout.size.checked_mul(count, this).ok_or_else(|| {
86 err_ub_format!("overflow computing total size of `{}`", intrinsic_name)
89 .write_bytes(ptr, iter::repeat(val_byte).take(byte_count.bytes() as usize))?;
92 // Floating-point operations
108 let &[ref f] = check_arg_count(args)?;
109 // FIXME: Using host floats.
110 let f = f32::from_bits(this.read_scalar(f)?.to_u32()?);
111 let f = match intrinsic_name {
113 "fabsf32" => f.abs(),
115 "sqrtf32" => f.sqrt(),
117 "exp2f32" => f.exp2(),
119 "log10f32" => f.log10(),
120 "log2f32" => f.log2(),
121 "floorf32" => f.floor(),
122 "ceilf32" => f.ceil(),
123 "truncf32" => f.trunc(),
124 "roundf32" => f.round(),
127 this.write_scalar(Scalar::from_u32(f.to_bits()), dest)?;
145 let &[ref f] = check_arg_count(args)?;
146 // FIXME: Using host floats.
147 let f = f64::from_bits(this.read_scalar(f)?.to_u64()?);
148 let f = match intrinsic_name {
150 "fabsf64" => f.abs(),
152 "sqrtf64" => f.sqrt(),
154 "exp2f64" => f.exp2(),
156 "log10f64" => f.log10(),
157 "log2f64" => f.log2(),
158 "floorf64" => f.floor(),
159 "ceilf64" => f.ceil(),
160 "truncf64" => f.trunc(),
161 "roundf64" => f.round(),
164 this.write_scalar(Scalar::from_u64(f.to_bits()), dest)?;
174 let &[ref a, ref b] = check_arg_count(args)?;
175 let a = this.read_immediate(a)?;
176 let b = this.read_immediate(b)?;
177 let op = match intrinsic_name {
178 "fadd_fast" => mir::BinOp::Add,
179 "fsub_fast" => mir::BinOp::Sub,
180 "fmul_fast" => mir::BinOp::Mul,
181 "fdiv_fast" => mir::BinOp::Div,
182 "frem_fast" => mir::BinOp::Rem,
185 let float_finite = |x: ImmTy<'tcx, _>| -> InterpResult<'tcx, bool> {
186 Ok(match x.layout.ty.kind() {
187 ty::Float(FloatTy::F32) => x.to_scalar()?.to_f32()?.is_finite(),
188 ty::Float(FloatTy::F64) => x.to_scalar()?.to_f64()?.is_finite(),
190 "`{}` called with non-float input type {:?}",
196 match (float_finite(a)?, float_finite(b)?) {
197 (false, false) => throw_ub_format!(
198 "`{}` intrinsic called with non-finite value as both parameters",
201 (false, _) => throw_ub_format!(
202 "`{}` intrinsic called with non-finite value as first parameter",
205 (_, false) => throw_ub_format!(
206 "`{}` intrinsic called with non-finite value as second parameter",
211 this.binop_ignore_overflow(op, &a, &b, dest)?;
219 let &[ref a, ref b] = check_arg_count(args)?;
220 let a = this.read_scalar(a)?.to_f32()?;
221 let b = this.read_scalar(b)?.to_f32()?;
222 let res = match intrinsic_name {
223 "minnumf32" => a.min(b),
224 "maxnumf32" => a.max(b),
225 "copysignf32" => a.copy_sign(b),
228 this.write_scalar(Scalar::from_f32(res), dest)?;
236 let &[ref a, ref b] = check_arg_count(args)?;
237 let a = this.read_scalar(a)?.to_f64()?;
238 let b = this.read_scalar(b)?.to_f64()?;
239 let res = match intrinsic_name {
240 "minnumf64" => a.min(b),
241 "maxnumf64" => a.max(b),
242 "copysignf64" => a.copy_sign(b),
245 this.write_scalar(Scalar::from_f64(res), dest)?;
249 let &[ref f, ref f2] = check_arg_count(args)?;
250 // FIXME: Using host floats.
251 let f = f32::from_bits(this.read_scalar(f)?.to_u32()?);
252 let f2 = f32::from_bits(this.read_scalar(f2)?.to_u32()?);
253 this.write_scalar(Scalar::from_u32(f.powf(f2).to_bits()), dest)?;
257 let &[ref f, ref f2] = check_arg_count(args)?;
258 // FIXME: Using host floats.
259 let f = f64::from_bits(this.read_scalar(f)?.to_u64()?);
260 let f2 = f64::from_bits(this.read_scalar(f2)?.to_u64()?);
261 this.write_scalar(Scalar::from_u64(f.powf(f2).to_bits()), dest)?;
265 let &[ref a, ref b, ref c] = check_arg_count(args)?;
266 let a = this.read_scalar(a)?.to_f32()?;
267 let b = this.read_scalar(b)?.to_f32()?;
268 let c = this.read_scalar(c)?.to_f32()?;
269 let res = a.mul_add(b, c).value;
270 this.write_scalar(Scalar::from_f32(res), dest)?;
274 let &[ref a, ref b, ref c] = check_arg_count(args)?;
275 let a = this.read_scalar(a)?.to_f64()?;
276 let b = this.read_scalar(b)?.to_f64()?;
277 let c = this.read_scalar(c)?.to_f64()?;
278 let res = a.mul_add(b, c).value;
279 this.write_scalar(Scalar::from_f64(res), dest)?;
283 let &[ref f, ref i] = check_arg_count(args)?;
284 // FIXME: Using host floats.
285 let f = f32::from_bits(this.read_scalar(f)?.to_u32()?);
286 let i = this.read_scalar(i)?.to_i32()?;
287 this.write_scalar(Scalar::from_u32(f.powi(i).to_bits()), dest)?;
291 let &[ref f, ref i] = check_arg_count(args)?;
292 // FIXME: Using host floats.
293 let f = f64::from_bits(this.read_scalar(f)?.to_u64()?);
294 let i = this.read_scalar(i)?.to_i32()?;
295 this.write_scalar(Scalar::from_u64(f.powi(i).to_bits()), dest)?;
298 "float_to_int_unchecked" => {
299 let &[ref val] = check_arg_count(args)?;
300 let val = this.read_immediate(val)?;
302 let res = match val.layout.ty.kind() {
303 ty::Float(FloatTy::F32) =>
304 this.float_to_int_unchecked(val.to_scalar()?.to_f32()?, dest.layout.ty)?,
305 ty::Float(FloatTy::F64) =>
306 this.float_to_int_unchecked(val.to_scalar()?.to_f64()?, dest.layout.ty)?,
309 "`float_to_int_unchecked` called with non-float input type {:?}",
314 this.write_scalar(res, dest)?;
321 let &[ref op] = check_arg_count(args)?;
322 let (op, op_len) = this.operand_to_simd(op)?;
323 let (dest, dest_len) = this.place_to_simd(dest)?;
325 assert_eq!(dest_len, op_len);
331 let which = match intrinsic_name {
332 "simd_neg" => Op::MirOp(mir::UnOp::Neg),
333 "simd_fabs" => Op::Abs,
337 for i in 0..dest_len {
338 let op = this.read_immediate(&this.mplace_index(&op, i)?.into())?;
339 let dest = this.mplace_index(&dest, i)?;
340 let val = match which {
341 Op::MirOp(mir_op) => this.unary_op(mir_op, &op)?.to_scalar()?,
343 // Works for f32 and f64.
344 let ty::Float(float_ty) = op.layout.ty.kind() else {
345 bug!("simd_fabs operand is not a float")
347 let op = op.to_scalar()?;
349 FloatTy::F32 => Scalar::from_f32(op.to_f32()?.abs()),
350 FloatTy::F64 => Scalar::from_f64(op.to_f64()?.abs()),
354 this.write_scalar(val, &dest.into())?;
376 | "simd_saturating_add"
377 | "simd_saturating_sub" => {
380 let &[ref left, ref right] = check_arg_count(args)?;
381 let (left, left_len) = this.operand_to_simd(left)?;
382 let (right, right_len) = this.operand_to_simd(right)?;
383 let (dest, dest_len) = this.place_to_simd(dest)?;
385 assert_eq!(dest_len, left_len);
386 assert_eq!(dest_len, right_len);
394 let which = match intrinsic_name {
395 "simd_add" => Op::MirOp(BinOp::Add),
396 "simd_sub" => Op::MirOp(BinOp::Sub),
397 "simd_mul" => Op::MirOp(BinOp::Mul),
398 "simd_div" => Op::MirOp(BinOp::Div),
399 "simd_rem" => Op::MirOp(BinOp::Rem),
400 "simd_shl" => Op::MirOp(BinOp::Shl),
401 "simd_shr" => Op::MirOp(BinOp::Shr),
402 "simd_and" => Op::MirOp(BinOp::BitAnd),
403 "simd_or" => Op::MirOp(BinOp::BitOr),
404 "simd_xor" => Op::MirOp(BinOp::BitXor),
405 "simd_eq" => Op::MirOp(BinOp::Eq),
406 "simd_ne" => Op::MirOp(BinOp::Ne),
407 "simd_lt" => Op::MirOp(BinOp::Lt),
408 "simd_le" => Op::MirOp(BinOp::Le),
409 "simd_gt" => Op::MirOp(BinOp::Gt),
410 "simd_ge" => Op::MirOp(BinOp::Ge),
411 "simd_fmax" => Op::FMax,
412 "simd_fmin" => Op::FMin,
413 "simd_saturating_add" => Op::SaturatingOp(BinOp::Add),
414 "simd_saturating_sub" => Op::SaturatingOp(BinOp::Sub),
418 for i in 0..dest_len {
419 let left = this.read_immediate(&this.mplace_index(&left, i)?.into())?;
420 let right = this.read_immediate(&this.mplace_index(&right, i)?.into())?;
421 let dest = this.mplace_index(&dest, i)?;
422 let val = match which {
423 Op::MirOp(mir_op) => {
424 let (val, overflowed, ty) = this.overflowing_binary_op(mir_op, &left, &right)?;
425 if matches!(mir_op, BinOp::Shl | BinOp::Shr) {
426 // Shifts have extra UB as SIMD operations that the MIR binop does not have.
427 // See <https://github.com/rust-lang/rust/issues/91237>.
429 let r_val = right.to_scalar()?.to_bits(right.layout.size)?;
430 throw_ub_format!("overflowing shift by {} in `{}` in SIMD lane {}", r_val, intrinsic_name, i);
433 if matches!(mir_op, BinOp::Eq | BinOp::Ne | BinOp::Lt | BinOp::Le | BinOp::Gt | BinOp::Ge) {
434 // Special handling for boolean-returning operations
435 assert_eq!(ty, this.tcx.types.bool);
436 let val = val.to_bool().unwrap();
437 bool_to_simd_element(val, dest.layout.size)
439 assert_ne!(ty, this.tcx.types.bool);
440 assert_eq!(ty, dest.layout.ty);
445 fmax_op(&left, &right)?
448 fmin_op(&left, &right)?
450 Op::SaturatingOp(mir_op) => {
451 this.saturating_arith(mir_op, &left, &right)?
454 this.write_scalar(val, &dest.into())?;
464 | "simd_reduce_min" => {
467 let &[ref op] = check_arg_count(args)?;
468 let (op, op_len) = this.operand_to_simd(op)?;
471 |b| ImmTy::from_scalar(Scalar::from_bool(b), this.machine.layouts.bool);
479 let which = match intrinsic_name {
480 "simd_reduce_and" => Op::MirOp(BinOp::BitAnd),
481 "simd_reduce_or" => Op::MirOp(BinOp::BitOr),
482 "simd_reduce_xor" => Op::MirOp(BinOp::BitXor),
483 "simd_reduce_any" => Op::MirOpBool(BinOp::BitOr),
484 "simd_reduce_all" => Op::MirOpBool(BinOp::BitAnd),
485 "simd_reduce_max" => Op::Max,
486 "simd_reduce_min" => Op::Min,
490 // Initialize with first lane, then proceed with the rest.
491 let mut res = this.read_immediate(&this.mplace_index(&op, 0)?.into())?;
492 if matches!(which, Op::MirOpBool(_)) {
493 // Convert to `bool` scalar.
494 res = imm_from_bool(simd_element_to_bool(res)?);
497 let op = this.read_immediate(&this.mplace_index(&op, i)?.into())?;
499 Op::MirOp(mir_op) => {
500 this.binary_op(mir_op, &res, &op)?
502 Op::MirOpBool(mir_op) => {
503 let op = imm_from_bool(simd_element_to_bool(op)?);
504 this.binary_op(mir_op, &res, &op)?
507 if matches!(res.layout.ty.kind(), ty::Float(_)) {
508 ImmTy::from_scalar(fmax_op(&res, &op)?, res.layout)
510 // Just boring integers, so NaNs to worry about
511 if this.binary_op(BinOp::Ge, &res, &op)?.to_scalar()?.to_bool()? {
519 if matches!(res.layout.ty.kind(), ty::Float(_)) {
520 ImmTy::from_scalar(fmin_op(&res, &op)?, res.layout)
522 // Just boring integers, so NaNs to worry about
523 if this.binary_op(BinOp::Le, &res, &op)?.to_scalar()?.to_bool()? {
532 this.write_immediate(*res, dest)?;
535 | "simd_reduce_add_ordered"
536 | "simd_reduce_mul_ordered" => {
539 let &[ref op, ref init] = check_arg_count(args)?;
540 let (op, op_len) = this.operand_to_simd(op)?;
541 let init = this.read_immediate(init)?;
543 let mir_op = match intrinsic_name {
544 "simd_reduce_add_ordered" => BinOp::Add,
545 "simd_reduce_mul_ordered" => BinOp::Mul,
551 let op = this.read_immediate(&this.mplace_index(&op, i)?.into())?;
552 res = this.binary_op(mir_op, &res, &op)?;
554 this.write_immediate(*res, dest)?;
557 let &[ref mask, ref yes, ref no] = check_arg_count(args)?;
558 let (mask, mask_len) = this.operand_to_simd(mask)?;
559 let (yes, yes_len) = this.operand_to_simd(yes)?;
560 let (no, no_len) = this.operand_to_simd(no)?;
561 let (dest, dest_len) = this.place_to_simd(dest)?;
563 assert_eq!(dest_len, mask_len);
564 assert_eq!(dest_len, yes_len);
565 assert_eq!(dest_len, no_len);
567 for i in 0..dest_len {
568 let mask = this.read_immediate(&this.mplace_index(&mask, i)?.into())?;
569 let yes = this.read_immediate(&this.mplace_index(&yes, i)?.into())?;
570 let no = this.read_immediate(&this.mplace_index(&no, i)?.into())?;
571 let dest = this.mplace_index(&dest, i)?;
573 let mask = simd_element_to_bool(mask)?;
574 let val = if mask { yes } else { no };
575 this.write_immediate(*val, &dest.into())?;
579 "simd_cast" | "simd_as" => {
580 let &[ref op] = check_arg_count(args)?;
581 let (op, op_len) = this.operand_to_simd(op)?;
582 let (dest, dest_len) = this.place_to_simd(dest)?;
584 assert_eq!(dest_len, op_len);
586 let safe_cast = intrinsic_name == "simd_as";
588 for i in 0..dest_len {
589 let op = this.read_immediate(&this.mplace_index(&op, i)?.into())?;
590 let dest = this.mplace_index(&dest, i)?;
592 let val = match (op.layout.ty.kind(), dest.layout.ty.kind()) {
593 // Int-to-(int|float): always safe
594 (ty::Int(_) | ty::Uint(_), ty::Int(_) | ty::Uint(_) | ty::Float(_)) =>
595 this.misc_cast(&op, dest.layout.ty)?,
596 // Float-to-float: always safe
597 (ty::Float(_), ty::Float(_)) =>
598 this.misc_cast(&op, dest.layout.ty)?,
599 // Float-to-int in safe mode
600 (ty::Float(_), ty::Int(_) | ty::Uint(_)) if safe_cast =>
601 this.misc_cast(&op, dest.layout.ty)?,
602 // Float-to-int in unchecked mode
603 (ty::Float(FloatTy::F32), ty::Int(_) | ty::Uint(_)) if !safe_cast =>
604 this.float_to_int_unchecked(op.to_scalar()?.to_f32()?, dest.layout.ty)?.into(),
605 (ty::Float(FloatTy::F64), ty::Int(_) | ty::Uint(_)) if !safe_cast =>
606 this.float_to_int_unchecked(op.to_scalar()?.to_f64()?, dest.layout.ty)?.into(),
609 "Unsupported SIMD cast from element type {} to {}",
614 this.write_immediate(val, &dest.into())?;
619 "atomic_load" => this.atomic_load(args, dest, AtomicReadOp::SeqCst)?,
620 "atomic_load_relaxed" => this.atomic_load(args, dest, AtomicReadOp::Relaxed)?,
621 "atomic_load_acq" => this.atomic_load(args, dest, AtomicReadOp::Acquire)?,
623 "atomic_store" => this.atomic_store(args, AtomicWriteOp::SeqCst)?,
624 "atomic_store_relaxed" => this.atomic_store(args, AtomicWriteOp::Relaxed)?,
625 "atomic_store_rel" => this.atomic_store(args, AtomicWriteOp::Release)?,
627 "atomic_fence_acq" => this.atomic_fence(args, AtomicFenceOp::Acquire)?,
628 "atomic_fence_rel" => this.atomic_fence(args, AtomicFenceOp::Release)?,
629 "atomic_fence_acqrel" => this.atomic_fence(args, AtomicFenceOp::AcqRel)?,
630 "atomic_fence" => this.atomic_fence(args, AtomicFenceOp::SeqCst)?,
632 "atomic_singlethreadfence_acq" => this.compiler_fence(args, AtomicFenceOp::Acquire)?,
633 "atomic_singlethreadfence_rel" => this.compiler_fence(args, AtomicFenceOp::Release)?,
634 "atomic_singlethreadfence_acqrel" =>
635 this.compiler_fence(args, AtomicFenceOp::AcqRel)?,
636 "atomic_singlethreadfence" => this.compiler_fence(args, AtomicFenceOp::SeqCst)?,
638 "atomic_xchg" => this.atomic_exchange(args, dest, AtomicRwOp::SeqCst)?,
639 "atomic_xchg_acq" => this.atomic_exchange(args, dest, AtomicRwOp::Acquire)?,
640 "atomic_xchg_rel" => this.atomic_exchange(args, dest, AtomicRwOp::Release)?,
641 "atomic_xchg_acqrel" => this.atomic_exchange(args, dest, AtomicRwOp::AcqRel)?,
642 "atomic_xchg_relaxed" => this.atomic_exchange(args, dest, AtomicRwOp::Relaxed)?,
646 this.atomic_compare_exchange(args, dest, AtomicRwOp::SeqCst, AtomicReadOp::SeqCst)?,
648 "atomic_cxchg_acq" =>
649 this.atomic_compare_exchange(args, dest, AtomicRwOp::Acquire, AtomicReadOp::Acquire)?,
651 "atomic_cxchg_rel" =>
652 this.atomic_compare_exchange(args, dest, AtomicRwOp::Release, AtomicReadOp::Relaxed)?,
654 "atomic_cxchg_acqrel" =>
655 this.atomic_compare_exchange(args, dest, AtomicRwOp::AcqRel, AtomicReadOp::Acquire)?,
657 "atomic_cxchg_relaxed" =>
658 this.atomic_compare_exchange(args, dest, AtomicRwOp::Relaxed, AtomicReadOp::Relaxed)?,
660 "atomic_cxchg_acq_failrelaxed" =>
661 this.atomic_compare_exchange(args, dest, AtomicRwOp::Acquire, AtomicReadOp::Relaxed)?,
663 "atomic_cxchg_acqrel_failrelaxed" =>
664 this.atomic_compare_exchange(args, dest, AtomicRwOp::AcqRel, AtomicReadOp::Relaxed)?,
666 "atomic_cxchg_failrelaxed" =>
667 this.atomic_compare_exchange(args, dest, AtomicRwOp::SeqCst, AtomicReadOp::Relaxed)?,
669 "atomic_cxchg_failacq" =>
670 this.atomic_compare_exchange(args, dest, AtomicRwOp::SeqCst, AtomicReadOp::Acquire)?,
673 "atomic_cxchgweak" =>
674 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::SeqCst, AtomicReadOp::SeqCst)?,
676 "atomic_cxchgweak_acq" =>
677 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::Acquire, AtomicReadOp::Acquire)?,
679 "atomic_cxchgweak_rel" =>
680 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::Release, AtomicReadOp::Relaxed)?,
682 "atomic_cxchgweak_acqrel" =>
683 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::AcqRel, AtomicReadOp::Acquire)?,
685 "atomic_cxchgweak_relaxed" =>
686 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::Relaxed, AtomicReadOp::Relaxed)?,
688 "atomic_cxchgweak_acq_failrelaxed" =>
689 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::Acquire, AtomicReadOp::Relaxed)?,
691 "atomic_cxchgweak_acqrel_failrelaxed" =>
692 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::AcqRel, AtomicReadOp::Relaxed)?,
694 "atomic_cxchgweak_failrelaxed" =>
695 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::SeqCst, AtomicReadOp::Relaxed)?,
697 "atomic_cxchgweak_failacq" =>
698 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::SeqCst, AtomicReadOp::Acquire)?,
702 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOp::SeqCst)?,
705 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOp::Acquire)?,
708 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOp::Release)?,
710 "atomic_or_acqrel" =>
711 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOp::AcqRel)?,
713 "atomic_or_relaxed" =>
714 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOp::Relaxed)?,
717 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOp::SeqCst)?,
720 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOp::Acquire)?,
723 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOp::Release)?,
725 "atomic_xor_acqrel" =>
726 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOp::AcqRel)?,
728 "atomic_xor_relaxed" =>
729 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOp::Relaxed)?,
732 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOp::SeqCst)?,
735 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOp::Acquire)?,
738 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOp::Release)?,
740 "atomic_and_acqrel" =>
741 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOp::AcqRel)?,
743 "atomic_and_relaxed" =>
744 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOp::Relaxed)?,
747 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOp::SeqCst)?,
750 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOp::Acquire)?,
753 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOp::Release)?,
755 "atomic_nand_acqrel" =>
756 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOp::AcqRel)?,
758 "atomic_nand_relaxed" =>
759 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOp::Relaxed)?,
762 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOp::SeqCst)?,
765 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOp::Acquire)?,
768 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOp::Release)?,
770 "atomic_xadd_acqrel" =>
771 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOp::AcqRel)?,
773 "atomic_xadd_relaxed" =>
774 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOp::Relaxed)?,
777 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOp::SeqCst)?,
780 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOp::Acquire)?,
783 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOp::Release)?,
785 "atomic_xsub_acqrel" =>
786 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOp::AcqRel)?,
788 "atomic_xsub_relaxed" =>
789 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOp::Relaxed)?,
790 "atomic_min" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::SeqCst)?,
791 "atomic_min_acq" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::Acquire)?,
792 "atomic_min_rel" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::Release)?,
793 "atomic_min_acqrel" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::AcqRel)?,
794 "atomic_min_relaxed" =>
795 this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::Relaxed)?,
796 "atomic_max" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::SeqCst)?,
797 "atomic_max_acq" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::Acquire)?,
798 "atomic_max_rel" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::Release)?,
799 "atomic_max_acqrel" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::AcqRel)?,
800 "atomic_max_relaxed" =>
801 this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::Relaxed)?,
802 "atomic_umin" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::SeqCst)?,
803 "atomic_umin_acq" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::Acquire)?,
804 "atomic_umin_rel" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::Release)?,
805 "atomic_umin_acqrel" =>
806 this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::AcqRel)?,
807 "atomic_umin_relaxed" =>
808 this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::Relaxed)?,
809 "atomic_umax" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::SeqCst)?,
810 "atomic_umax_acq" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::Acquire)?,
811 "atomic_umax_rel" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::Release)?,
812 "atomic_umax_acqrel" =>
813 this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::AcqRel)?,
814 "atomic_umax_relaxed" =>
815 this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::Relaxed)?,
819 let &[ref num, ref denom] = check_arg_count(args)?;
820 this.exact_div(&this.read_immediate(num)?, &this.read_immediate(denom)?, dest)?;
823 "try" => return this.handle_try(args, dest, ret),
826 let &[] = check_arg_count(args)?;
827 // normally this would raise a SIGTRAP, which aborts if no debugger is connected
828 throw_machine_stop!(TerminationInfo::Abort("Trace/breakpoint trap".to_string()))
831 name => throw_unsup_format!("unimplemented intrinsic: {}", name),
834 trace!("{:?}", this.dump_place(**dest));
835 this.go_to_block(ret);
841 args: &[OpTy<'tcx, Tag>],
842 dest: &PlaceTy<'tcx, Tag>,
843 atomic: AtomicReadOp,
844 ) -> InterpResult<'tcx> {
845 let this = self.eval_context_mut();
847 let &[ref place] = check_arg_count(args)?;
848 let place = this.deref_operand(place)?;
850 // make sure it fits into a scalar; otherwise it cannot be atomic
851 let val = this.read_scalar_atomic(&place, atomic)?;
853 // Check alignment requirements. Atomics must always be aligned to their size,
854 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
856 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
857 this.memory.check_ptr_access_align(
861 CheckInAllocMsg::MemoryAccessTest,
863 // Perform regular access.
864 this.write_scalar(val, dest)?;
870 args: &[OpTy<'tcx, Tag>],
871 atomic: AtomicWriteOp,
872 ) -> InterpResult<'tcx> {
873 let this = self.eval_context_mut();
875 let &[ref place, ref val] = check_arg_count(args)?;
876 let place = this.deref_operand(place)?;
877 let val = this.read_scalar(val)?; // make sure it fits into a scalar; otherwise it cannot be atomic
879 // Check alignment requirements. Atomics must always be aligned to their size,
880 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
882 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
883 this.memory.check_ptr_access_align(
887 CheckInAllocMsg::MemoryAccessTest,
890 // Perform atomic store
891 this.write_scalar_atomic(val, &place, atomic)?;
897 args: &[OpTy<'tcx, Tag>],
898 atomic: AtomicFenceOp,
899 ) -> InterpResult<'tcx> {
900 let &[] = check_arg_count(args)?;
902 //FIXME: compiler fences are currently ignored
908 args: &[OpTy<'tcx, Tag>],
909 atomic: AtomicFenceOp,
910 ) -> InterpResult<'tcx> {
911 let this = self.eval_context_mut();
912 let &[] = check_arg_count(args)?;
913 this.validate_atomic_fence(atomic)?;
919 args: &[OpTy<'tcx, Tag>],
920 dest: &PlaceTy<'tcx, Tag>,
923 ) -> InterpResult<'tcx> {
924 let this = self.eval_context_mut();
926 let &[ref place, ref rhs] = check_arg_count(args)?;
927 let place = this.deref_operand(place)?;
929 if !place.layout.ty.is_integral() {
930 bug!("Atomic arithmetic operations only work on integer types");
932 let rhs = this.read_immediate(rhs)?;
934 // Check alignment requirements. Atomics must always be aligned to their size,
935 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
937 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
938 this.memory.check_ptr_access_align(
942 CheckInAllocMsg::MemoryAccessTest,
947 let old = this.atomic_min_max_scalar(&place, rhs, true, atomic)?;
948 this.write_immediate(*old, &dest)?; // old value is returned
952 let old = this.atomic_min_max_scalar(&place, rhs, false, atomic)?;
953 this.write_immediate(*old, &dest)?; // old value is returned
956 AtomicOp::MirOp(op, neg) => {
957 let old = this.atomic_op_immediate(&place, &rhs, op, neg, atomic)?;
958 this.write_immediate(*old, dest)?; // old value is returned
966 args: &[OpTy<'tcx, Tag>],
967 dest: &PlaceTy<'tcx, Tag>,
969 ) -> InterpResult<'tcx> {
970 let this = self.eval_context_mut();
972 let &[ref place, ref new] = check_arg_count(args)?;
973 let place = this.deref_operand(place)?;
974 let new = this.read_scalar(new)?;
976 // Check alignment requirements. Atomics must always be aligned to their size,
977 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
979 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
980 this.memory.check_ptr_access_align(
984 CheckInAllocMsg::MemoryAccessTest,
987 let old = this.atomic_exchange_scalar(&place, new, atomic)?;
988 this.write_scalar(old, dest)?; // old value is returned
992 fn atomic_compare_exchange_impl(
994 args: &[OpTy<'tcx, Tag>],
995 dest: &PlaceTy<'tcx, Tag>,
998 can_fail_spuriously: bool,
999 ) -> InterpResult<'tcx> {
1000 let this = self.eval_context_mut();
1002 let &[ref place, ref expect_old, ref new] = check_arg_count(args)?;
1003 let place = this.deref_operand(place)?;
1004 let expect_old = this.read_immediate(expect_old)?; // read as immediate for the sake of `binary_op()`
1005 let new = this.read_scalar(new)?;
1007 // Check alignment requirements. Atomics must always be aligned to their size,
1008 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
1010 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
1011 this.memory.check_ptr_access_align(
1015 CheckInAllocMsg::MemoryAccessTest,
1018 let old = this.atomic_compare_exchange_scalar(
1024 can_fail_spuriously,
1027 // Return old value.
1028 this.write_immediate(old, dest)?;
1032 fn atomic_compare_exchange(
1034 args: &[OpTy<'tcx, Tag>],
1035 dest: &PlaceTy<'tcx, Tag>,
1036 success: AtomicRwOp,
1038 ) -> InterpResult<'tcx> {
1039 self.atomic_compare_exchange_impl(args, dest, success, fail, false)
1042 fn atomic_compare_exchange_weak(
1044 args: &[OpTy<'tcx, Tag>],
1045 dest: &PlaceTy<'tcx, Tag>,
1046 success: AtomicRwOp,
1048 ) -> InterpResult<'tcx> {
1049 self.atomic_compare_exchange_impl(args, dest, success, fail, true)
1052 fn float_to_int_unchecked<F>(
1055 dest_ty: ty::Ty<'tcx>,
1056 ) -> InterpResult<'tcx, Scalar<Tag>>
1058 F: Float + Into<Scalar<Tag>>,
1060 let this = self.eval_context_ref();
1062 // Step 1: cut off the fractional part of `f`. The result of this is
1063 // guaranteed to be precisely representable in IEEE floats.
1064 let f = f.round_to_integral(Round::TowardZero).value;
1066 // Step 2: Cast the truncated float to the target integer type and see if we lose any information in this step.
1067 Ok(match dest_ty.kind() {
1070 let size = Integer::from_uint_ty(this, *t).size();
1071 let res = f.to_u128(size.bits_usize());
1072 if res.status.is_empty() {
1073 // No status flags means there was no further rounding or other loss of precision.
1074 Scalar::from_uint(res.value, size)
1076 // `f` was not representable in this integer type.
1078 "`float_to_int_unchecked` intrinsic called on {} which cannot be represented in target type `{:?}`",
1086 let size = Integer::from_int_ty(this, *t).size();
1087 let res = f.to_i128(size.bits_usize());
1088 if res.status.is_empty() {
1089 // No status flags means there was no further rounding or other loss of precision.
1090 Scalar::from_int(res.value, size)
1092 // `f` was not representable in this integer type.
1094 "`float_to_int_unchecked` intrinsic called on {} which cannot be represented in target type `{:?}`",
1101 _ => bug!("`float_to_int_unchecked` called with non-int output type {:?}", dest_ty),
1107 left: &ImmTy<'tcx, Tag>,
1108 right: &ImmTy<'tcx, Tag>,
1109 ) -> InterpResult<'tcx, Scalar<Tag>> {
1110 assert_eq!(left.layout.ty, right.layout.ty);
1111 let ty::Float(float_ty) = left.layout.ty.kind() else {
1112 bug!("fmax operand is not a float")
1114 let left = left.to_scalar()?;
1115 let right = right.to_scalar()?;
1117 FloatTy::F32 => Scalar::from_f32(left.to_f32()?.max(right.to_f32()?)),
1118 FloatTy::F64 => Scalar::from_f64(left.to_f64()?.max(right.to_f64()?)),
1123 left: &ImmTy<'tcx, Tag>,
1124 right: &ImmTy<'tcx, Tag>,
1125 ) -> InterpResult<'tcx, Scalar<Tag>> {
1126 assert_eq!(left.layout.ty, right.layout.ty);
1127 let ty::Float(float_ty) = left.layout.ty.kind() else {
1128 bug!("fmin operand is not a float")
1130 let left = left.to_scalar()?;
1131 let right = right.to_scalar()?;
1133 FloatTy::F32 => Scalar::from_f32(left.to_f32()?.min(right.to_f32()?)),
1134 FloatTy::F64 => Scalar::from_f64(left.to_f64()?.min(right.to_f64()?)),