5 use rustc_apfloat::{Float, Round};
6 use rustc_middle::ty::layout::{IntegerExt, LayoutOf};
7 use rustc_middle::{mir, mir::BinOp, ty, ty::FloatTy};
8 use rustc_target::abi::{Align, Integer};
11 use helpers::check_arg_count;
14 MirOp(mir::BinOp, bool),
19 impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
20 pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
23 instance: ty::Instance<'tcx>,
24 args: &[OpTy<'tcx, Tag>],
25 ret: Option<(&PlaceTy<'tcx, Tag>, mir::BasicBlock)>,
26 _unwind: StackPopUnwind,
27 ) -> InterpResult<'tcx> {
28 let this = self.eval_context_mut();
30 if this.emulate_intrinsic(instance, args, ret)? {
34 // All supported intrinsics have a return place.
35 let intrinsic_name = &*this.tcx.item_name(instance.def_id()).as_str();
36 let (dest, ret) = match ret {
37 None => throw_unsup_format!("unimplemented (diverging) intrinsic: {}", intrinsic_name),
41 // Then handle terminating intrinsics.
42 match intrinsic_name {
43 // Miri overwriting CTFE intrinsics.
44 "ptr_guaranteed_eq" => {
45 let &[ref left, ref right] = check_arg_count(args)?;
46 let left = this.read_immediate(left)?;
47 let right = this.read_immediate(right)?;
48 this.binop_ignore_overflow(mir::BinOp::Eq, &left, &right, dest)?;
50 "ptr_guaranteed_ne" => {
51 let &[ref left, ref right] = check_arg_count(args)?;
52 let left = this.read_immediate(left)?;
53 let right = this.read_immediate(right)?;
54 this.binop_ignore_overflow(mir::BinOp::Ne, &left, &right, dest)?;
57 // Raw memory accesses
59 let &[ref place] = check_arg_count(args)?;
60 let place = this.deref_operand(place)?;
61 this.copy_op(&place.into(), dest)?;
64 let &[ref place, ref dest] = check_arg_count(args)?;
65 let place = this.deref_operand(place)?;
66 this.copy_op(dest, &place.into())?;
69 "write_bytes" | "volatile_set_memory" => {
70 let &[ref ptr, ref val_byte, ref count] = check_arg_count(args)?;
71 let ty = instance.substs.type_at(0);
72 let ty_layout = this.layout_of(ty)?;
73 let val_byte = this.read_scalar(val_byte)?.to_u8()?;
74 let ptr = this.read_pointer(ptr)?;
75 let count = this.read_scalar(count)?.to_machine_usize(this)?;
76 let byte_count = ty_layout.size.checked_mul(count, this).ok_or_else(|| {
77 err_ub_format!("overflow computing total size of `{}`", intrinsic_name)
80 .write_bytes(ptr, iter::repeat(val_byte).take(byte_count.bytes() as usize))?;
83 // Floating-point operations
99 let &[ref f] = check_arg_count(args)?;
100 // FIXME: Using host floats.
101 let f = f32::from_bits(this.read_scalar(f)?.to_u32()?);
102 let f = match intrinsic_name {
104 "fabsf32" => f.abs(),
106 "sqrtf32" => f.sqrt(),
108 "exp2f32" => f.exp2(),
110 "log10f32" => f.log10(),
111 "log2f32" => f.log2(),
112 "floorf32" => f.floor(),
113 "ceilf32" => f.ceil(),
114 "truncf32" => f.trunc(),
115 "roundf32" => f.round(),
118 this.write_scalar(Scalar::from_u32(f.to_bits()), dest)?;
136 let &[ref f] = check_arg_count(args)?;
137 // FIXME: Using host floats.
138 let f = f64::from_bits(this.read_scalar(f)?.to_u64()?);
139 let f = match intrinsic_name {
141 "fabsf64" => f.abs(),
143 "sqrtf64" => f.sqrt(),
145 "exp2f64" => f.exp2(),
147 "log10f64" => f.log10(),
148 "log2f64" => f.log2(),
149 "floorf64" => f.floor(),
150 "ceilf64" => f.ceil(),
151 "truncf64" => f.trunc(),
152 "roundf64" => f.round(),
155 this.write_scalar(Scalar::from_u64(f.to_bits()), dest)?;
165 let &[ref a, ref b] = check_arg_count(args)?;
166 let a = this.read_immediate(a)?;
167 let b = this.read_immediate(b)?;
168 let op = match intrinsic_name {
169 "fadd_fast" => mir::BinOp::Add,
170 "fsub_fast" => mir::BinOp::Sub,
171 "fmul_fast" => mir::BinOp::Mul,
172 "fdiv_fast" => mir::BinOp::Div,
173 "frem_fast" => mir::BinOp::Rem,
176 let float_finite = |x: ImmTy<'tcx, _>| -> InterpResult<'tcx, bool> {
177 Ok(match x.layout.ty.kind() {
178 ty::Float(FloatTy::F32) => x.to_scalar()?.to_f32()?.is_finite(),
179 ty::Float(FloatTy::F64) => x.to_scalar()?.to_f64()?.is_finite(),
181 "`{}` called with non-float input type {:?}",
187 match (float_finite(a)?, float_finite(b)?) {
188 (false, false) => throw_ub_format!(
189 "`{}` intrinsic called with non-finite value as both parameters",
192 (false, _) => throw_ub_format!(
193 "`{}` intrinsic called with non-finite value as first parameter",
196 (_, false) => throw_ub_format!(
197 "`{}` intrinsic called with non-finite value as second parameter",
202 this.binop_ignore_overflow(op, &a, &b, dest)?;
210 let &[ref a, ref b] = check_arg_count(args)?;
211 let a = this.read_scalar(a)?.to_f32()?;
212 let b = this.read_scalar(b)?.to_f32()?;
213 let res = match intrinsic_name {
214 "minnumf32" => a.min(b),
215 "maxnumf32" => a.max(b),
216 "copysignf32" => a.copy_sign(b),
219 this.write_scalar(Scalar::from_f32(res), dest)?;
227 let &[ref a, ref b] = check_arg_count(args)?;
228 let a = this.read_scalar(a)?.to_f64()?;
229 let b = this.read_scalar(b)?.to_f64()?;
230 let res = match intrinsic_name {
231 "minnumf64" => a.min(b),
232 "maxnumf64" => a.max(b),
233 "copysignf64" => a.copy_sign(b),
236 this.write_scalar(Scalar::from_f64(res), dest)?;
240 let &[ref f, ref f2] = check_arg_count(args)?;
241 // FIXME: Using host floats.
242 let f = f32::from_bits(this.read_scalar(f)?.to_u32()?);
243 let f2 = f32::from_bits(this.read_scalar(f2)?.to_u32()?);
244 this.write_scalar(Scalar::from_u32(f.powf(f2).to_bits()), dest)?;
248 let &[ref f, ref f2] = check_arg_count(args)?;
249 // FIXME: Using host floats.
250 let f = f64::from_bits(this.read_scalar(f)?.to_u64()?);
251 let f2 = f64::from_bits(this.read_scalar(f2)?.to_u64()?);
252 this.write_scalar(Scalar::from_u64(f.powf(f2).to_bits()), dest)?;
256 let &[ref a, ref b, ref c] = check_arg_count(args)?;
257 let a = this.read_scalar(a)?.to_f32()?;
258 let b = this.read_scalar(b)?.to_f32()?;
259 let c = this.read_scalar(c)?.to_f32()?;
260 let res = a.mul_add(b, c).value;
261 this.write_scalar(Scalar::from_f32(res), dest)?;
265 let &[ref a, ref b, ref c] = check_arg_count(args)?;
266 let a = this.read_scalar(a)?.to_f64()?;
267 let b = this.read_scalar(b)?.to_f64()?;
268 let c = this.read_scalar(c)?.to_f64()?;
269 let res = a.mul_add(b, c).value;
270 this.write_scalar(Scalar::from_f64(res), dest)?;
274 let &[ref f, ref i] = check_arg_count(args)?;
275 // FIXME: Using host floats.
276 let f = f32::from_bits(this.read_scalar(f)?.to_u32()?);
277 let i = this.read_scalar(i)?.to_i32()?;
278 this.write_scalar(Scalar::from_u32(f.powi(i).to_bits()), dest)?;
282 let &[ref f, ref i] = check_arg_count(args)?;
283 // FIXME: Using host floats.
284 let f = f64::from_bits(this.read_scalar(f)?.to_u64()?);
285 let i = this.read_scalar(i)?.to_i32()?;
286 this.write_scalar(Scalar::from_u64(f.powi(i).to_bits()), dest)?;
289 "float_to_int_unchecked" => {
290 let &[ref val] = check_arg_count(args)?;
291 let val = this.read_immediate(val)?;
293 let res = match val.layout.ty.kind() {
294 ty::Float(FloatTy::F32) =>
295 this.float_to_int_unchecked(val.to_scalar()?.to_f32()?, dest.layout.ty)?,
296 ty::Float(FloatTy::F64) =>
297 this.float_to_int_unchecked(val.to_scalar()?.to_f64()?, dest.layout.ty)?,
300 "`float_to_int_unchecked` called with non-float input type {:?}",
305 this.write_scalar(res, dest)?;
317 let &[ref left, ref right] = check_arg_count(args)?;
318 let (left, left_len) = this.operand_to_simd(left)?;
319 let (right, right_len) = this.operand_to_simd(right)?;
320 let (dest, dest_len) = this.place_to_simd(dest)?;
322 assert_eq!(dest_len, left_len);
323 assert_eq!(dest_len, right_len);
325 let op = match intrinsic_name {
326 "simd_add" => mir::BinOp::Add,
327 "simd_sub" => mir::BinOp::Sub,
328 "simd_mul" => mir::BinOp::Mul,
329 "simd_div" => mir::BinOp::Div,
330 "simd_rem" => mir::BinOp::Rem,
331 "simd_shl" => mir::BinOp::Shl,
332 "simd_shr" => mir::BinOp::Shr,
336 for i in 0..dest_len {
337 let left = this.read_immediate(&this.mplace_index(&left, i)?.into())?;
338 let right = this.read_immediate(&this.mplace_index(&right, i)?.into())?;
339 let dest = this.mplace_index(&dest, i)?;
340 let (val, overflowed, ty) = this.overflowing_binary_op(op, &left, &right)?;
341 assert_eq!(ty, dest.layout.ty);
342 if matches!(op, mir::BinOp::Shl | mir::BinOp::Shr) {
343 // Shifts have extra UB as SIMD operations that the MIR binop does not have.
344 // See <https://github.com/rust-lang/rust/issues/91237>.
346 let r_val = right.to_scalar()?.to_bits(right.layout.size)?;
347 throw_ub_format!("overflowing shift by {} in `{}` in SIMD lane {}", r_val, intrinsic_name, i);
350 this.write_scalar(val, &dest.into())?;
355 "atomic_load" => this.atomic_load(args, dest, AtomicReadOp::SeqCst)?,
356 "atomic_load_relaxed" => this.atomic_load(args, dest, AtomicReadOp::Relaxed)?,
357 "atomic_load_acq" => this.atomic_load(args, dest, AtomicReadOp::Acquire)?,
359 "atomic_store" => this.atomic_store(args, AtomicWriteOp::SeqCst)?,
360 "atomic_store_relaxed" => this.atomic_store(args, AtomicWriteOp::Relaxed)?,
361 "atomic_store_rel" => this.atomic_store(args, AtomicWriteOp::Release)?,
363 "atomic_fence_acq" => this.atomic_fence(args, AtomicFenceOp::Acquire)?,
364 "atomic_fence_rel" => this.atomic_fence(args, AtomicFenceOp::Release)?,
365 "atomic_fence_acqrel" => this.atomic_fence(args, AtomicFenceOp::AcqRel)?,
366 "atomic_fence" => this.atomic_fence(args, AtomicFenceOp::SeqCst)?,
368 "atomic_singlethreadfence_acq" => this.compiler_fence(args, AtomicFenceOp::Acquire)?,
369 "atomic_singlethreadfence_rel" => this.compiler_fence(args, AtomicFenceOp::Release)?,
370 "atomic_singlethreadfence_acqrel" =>
371 this.compiler_fence(args, AtomicFenceOp::AcqRel)?,
372 "atomic_singlethreadfence" => this.compiler_fence(args, AtomicFenceOp::SeqCst)?,
374 "atomic_xchg" => this.atomic_exchange(args, dest, AtomicRwOp::SeqCst)?,
375 "atomic_xchg_acq" => this.atomic_exchange(args, dest, AtomicRwOp::Acquire)?,
376 "atomic_xchg_rel" => this.atomic_exchange(args, dest, AtomicRwOp::Release)?,
377 "atomic_xchg_acqrel" => this.atomic_exchange(args, dest, AtomicRwOp::AcqRel)?,
378 "atomic_xchg_relaxed" => this.atomic_exchange(args, dest, AtomicRwOp::Relaxed)?,
382 this.atomic_compare_exchange(args, dest, AtomicRwOp::SeqCst, AtomicReadOp::SeqCst)?,
384 "atomic_cxchg_acq" =>
385 this.atomic_compare_exchange(args, dest, AtomicRwOp::Acquire, AtomicReadOp::Acquire)?,
387 "atomic_cxchg_rel" =>
388 this.atomic_compare_exchange(args, dest, AtomicRwOp::Release, AtomicReadOp::Relaxed)?,
390 "atomic_cxchg_acqrel" =>
391 this.atomic_compare_exchange(args, dest, AtomicRwOp::AcqRel, AtomicReadOp::Acquire)?,
393 "atomic_cxchg_relaxed" =>
394 this.atomic_compare_exchange(args, dest, AtomicRwOp::Relaxed, AtomicReadOp::Relaxed)?,
396 "atomic_cxchg_acq_failrelaxed" =>
397 this.atomic_compare_exchange(args, dest, AtomicRwOp::Acquire, AtomicReadOp::Relaxed)?,
399 "atomic_cxchg_acqrel_failrelaxed" =>
400 this.atomic_compare_exchange(args, dest, AtomicRwOp::AcqRel, AtomicReadOp::Relaxed)?,
402 "atomic_cxchg_failrelaxed" =>
403 this.atomic_compare_exchange(args, dest, AtomicRwOp::SeqCst, AtomicReadOp::Relaxed)?,
405 "atomic_cxchg_failacq" =>
406 this.atomic_compare_exchange(args, dest, AtomicRwOp::SeqCst, AtomicReadOp::Acquire)?,
409 "atomic_cxchgweak" =>
410 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::SeqCst, AtomicReadOp::SeqCst)?,
412 "atomic_cxchgweak_acq" =>
413 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::Acquire, AtomicReadOp::Acquire)?,
415 "atomic_cxchgweak_rel" =>
416 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::Release, AtomicReadOp::Relaxed)?,
418 "atomic_cxchgweak_acqrel" =>
419 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::AcqRel, AtomicReadOp::Acquire)?,
421 "atomic_cxchgweak_relaxed" =>
422 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::Relaxed, AtomicReadOp::Relaxed)?,
424 "atomic_cxchgweak_acq_failrelaxed" =>
425 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::Acquire, AtomicReadOp::Relaxed)?,
427 "atomic_cxchgweak_acqrel_failrelaxed" =>
428 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::AcqRel, AtomicReadOp::Relaxed)?,
430 "atomic_cxchgweak_failrelaxed" =>
431 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::SeqCst, AtomicReadOp::Relaxed)?,
433 "atomic_cxchgweak_failacq" =>
434 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::SeqCst, AtomicReadOp::Acquire)?,
438 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOp::SeqCst)?,
441 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOp::Acquire)?,
444 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOp::Release)?,
446 "atomic_or_acqrel" =>
447 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOp::AcqRel)?,
449 "atomic_or_relaxed" =>
450 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOp::Relaxed)?,
453 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOp::SeqCst)?,
456 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOp::Acquire)?,
459 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOp::Release)?,
461 "atomic_xor_acqrel" =>
462 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOp::AcqRel)?,
464 "atomic_xor_relaxed" =>
465 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOp::Relaxed)?,
468 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOp::SeqCst)?,
471 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOp::Acquire)?,
474 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOp::Release)?,
476 "atomic_and_acqrel" =>
477 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOp::AcqRel)?,
479 "atomic_and_relaxed" =>
480 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOp::Relaxed)?,
483 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOp::SeqCst)?,
486 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOp::Acquire)?,
489 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOp::Release)?,
491 "atomic_nand_acqrel" =>
492 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOp::AcqRel)?,
494 "atomic_nand_relaxed" =>
495 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOp::Relaxed)?,
498 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOp::SeqCst)?,
501 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOp::Acquire)?,
504 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOp::Release)?,
506 "atomic_xadd_acqrel" =>
507 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOp::AcqRel)?,
509 "atomic_xadd_relaxed" =>
510 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOp::Relaxed)?,
513 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOp::SeqCst)?,
516 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOp::Acquire)?,
519 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOp::Release)?,
521 "atomic_xsub_acqrel" =>
522 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOp::AcqRel)?,
524 "atomic_xsub_relaxed" =>
525 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOp::Relaxed)?,
526 "atomic_min" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::SeqCst)?,
527 "atomic_min_acq" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::Acquire)?,
528 "atomic_min_rel" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::Release)?,
529 "atomic_min_acqrel" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::AcqRel)?,
530 "atomic_min_relaxed" =>
531 this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::Relaxed)?,
532 "atomic_max" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::SeqCst)?,
533 "atomic_max_acq" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::Acquire)?,
534 "atomic_max_rel" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::Release)?,
535 "atomic_max_acqrel" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::AcqRel)?,
536 "atomic_max_relaxed" =>
537 this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::Relaxed)?,
538 "atomic_umin" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::SeqCst)?,
539 "atomic_umin_acq" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::Acquire)?,
540 "atomic_umin_rel" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::Release)?,
541 "atomic_umin_acqrel" =>
542 this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::AcqRel)?,
543 "atomic_umin_relaxed" =>
544 this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::Relaxed)?,
545 "atomic_umax" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::SeqCst)?,
546 "atomic_umax_acq" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::Acquire)?,
547 "atomic_umax_rel" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::Release)?,
548 "atomic_umax_acqrel" =>
549 this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::AcqRel)?,
550 "atomic_umax_relaxed" =>
551 this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::Relaxed)?,
555 let &[ref num, ref denom] = check_arg_count(args)?;
556 this.exact_div(&this.read_immediate(num)?, &this.read_immediate(denom)?, dest)?;
559 "try" => return this.handle_try(args, dest, ret),
562 let &[] = check_arg_count(args)?;
563 // normally this would raise a SIGTRAP, which aborts if no debugger is connected
564 throw_machine_stop!(TerminationInfo::Abort("Trace/breakpoint trap".to_string()))
567 name => throw_unsup_format!("unimplemented intrinsic: {}", name),
570 trace!("{:?}", this.dump_place(**dest));
571 this.go_to_block(ret);
577 args: &[OpTy<'tcx, Tag>],
578 dest: &PlaceTy<'tcx, Tag>,
579 atomic: AtomicReadOp,
580 ) -> InterpResult<'tcx> {
581 let this = self.eval_context_mut();
583 let &[ref place] = check_arg_count(args)?;
584 let place = this.deref_operand(place)?;
586 // make sure it fits into a scalar; otherwise it cannot be atomic
587 let val = this.read_scalar_atomic(&place, atomic)?;
589 // Check alignment requirements. Atomics must always be aligned to their size,
590 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
592 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
593 this.memory.check_ptr_access_align(
597 CheckInAllocMsg::MemoryAccessTest,
599 // Perform regular access.
600 this.write_scalar(val, dest)?;
606 args: &[OpTy<'tcx, Tag>],
607 atomic: AtomicWriteOp,
608 ) -> InterpResult<'tcx> {
609 let this = self.eval_context_mut();
611 let &[ref place, ref val] = check_arg_count(args)?;
612 let place = this.deref_operand(place)?;
613 let val = this.read_scalar(val)?; // make sure it fits into a scalar; otherwise it cannot be atomic
615 // Check alignment requirements. Atomics must always be aligned to their size,
616 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
618 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
619 this.memory.check_ptr_access_align(
623 CheckInAllocMsg::MemoryAccessTest,
626 // Perform atomic store
627 this.write_scalar_atomic(val, &place, atomic)?;
633 args: &[OpTy<'tcx, Tag>],
634 atomic: AtomicFenceOp,
635 ) -> InterpResult<'tcx> {
636 let &[] = check_arg_count(args)?;
638 //FIXME: compiler fences are currently ignored
644 args: &[OpTy<'tcx, Tag>],
645 atomic: AtomicFenceOp,
646 ) -> InterpResult<'tcx> {
647 let this = self.eval_context_mut();
648 let &[] = check_arg_count(args)?;
649 this.validate_atomic_fence(atomic)?;
655 args: &[OpTy<'tcx, Tag>],
656 dest: &PlaceTy<'tcx, Tag>,
659 ) -> InterpResult<'tcx> {
660 let this = self.eval_context_mut();
662 let &[ref place, ref rhs] = check_arg_count(args)?;
663 let place = this.deref_operand(place)?;
665 if !place.layout.ty.is_integral() {
666 bug!("Atomic arithmetic operations only work on integer types");
668 let rhs = this.read_immediate(rhs)?;
670 // Check alignment requirements. Atomics must always be aligned to their size,
671 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
673 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
674 this.memory.check_ptr_access_align(
678 CheckInAllocMsg::MemoryAccessTest,
683 let old = this.atomic_min_max_scalar(&place, rhs, true, atomic)?;
684 this.write_immediate(*old, &dest)?; // old value is returned
688 let old = this.atomic_min_max_scalar(&place, rhs, false, atomic)?;
689 this.write_immediate(*old, &dest)?; // old value is returned
692 AtomicOp::MirOp(op, neg) => {
693 let old = this.atomic_op_immediate(&place, &rhs, op, neg, atomic)?;
694 this.write_immediate(*old, dest)?; // old value is returned
702 args: &[OpTy<'tcx, Tag>],
703 dest: &PlaceTy<'tcx, Tag>,
705 ) -> InterpResult<'tcx> {
706 let this = self.eval_context_mut();
708 let &[ref place, ref new] = check_arg_count(args)?;
709 let place = this.deref_operand(place)?;
710 let new = this.read_scalar(new)?;
712 // Check alignment requirements. Atomics must always be aligned to their size,
713 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
715 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
716 this.memory.check_ptr_access_align(
720 CheckInAllocMsg::MemoryAccessTest,
723 let old = this.atomic_exchange_scalar(&place, new, atomic)?;
724 this.write_scalar(old, dest)?; // old value is returned
728 fn atomic_compare_exchange_impl(
730 args: &[OpTy<'tcx, Tag>],
731 dest: &PlaceTy<'tcx, Tag>,
734 can_fail_spuriously: bool,
735 ) -> InterpResult<'tcx> {
736 let this = self.eval_context_mut();
738 let &[ref place, ref expect_old, ref new] = check_arg_count(args)?;
739 let place = this.deref_operand(place)?;
740 let expect_old = this.read_immediate(expect_old)?; // read as immediate for the sake of `binary_op()`
741 let new = this.read_scalar(new)?;
743 // Check alignment requirements. Atomics must always be aligned to their size,
744 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
746 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
747 this.memory.check_ptr_access_align(
751 CheckInAllocMsg::MemoryAccessTest,
754 let old = this.atomic_compare_exchange_scalar(
764 this.write_immediate(old, dest)?;
768 fn atomic_compare_exchange(
770 args: &[OpTy<'tcx, Tag>],
771 dest: &PlaceTy<'tcx, Tag>,
774 ) -> InterpResult<'tcx> {
775 self.atomic_compare_exchange_impl(args, dest, success, fail, false)
778 fn atomic_compare_exchange_weak(
780 args: &[OpTy<'tcx, Tag>],
781 dest: &PlaceTy<'tcx, Tag>,
784 ) -> InterpResult<'tcx> {
785 self.atomic_compare_exchange_impl(args, dest, success, fail, true)
788 fn float_to_int_unchecked<F>(
791 dest_ty: ty::Ty<'tcx>,
792 ) -> InterpResult<'tcx, Scalar<Tag>>
794 F: Float + Into<Scalar<Tag>>,
796 let this = self.eval_context_ref();
798 // Step 1: cut off the fractional part of `f`. The result of this is
799 // guaranteed to be precisely representable in IEEE floats.
800 let f = f.round_to_integral(Round::TowardZero).value;
802 // Step 2: Cast the truncated float to the target integer type and see if we lose any information in this step.
803 Ok(match dest_ty.kind() {
806 let size = Integer::from_uint_ty(this, *t).size();
807 let res = f.to_u128(size.bits_usize());
808 if res.status.is_empty() {
809 // No status flags means there was no further rounding or other loss of precision.
810 Scalar::from_uint(res.value, size)
812 // `f` was not representable in this integer type.
814 "`float_to_int_unchecked` intrinsic called on {} which cannot be represented in target type `{:?}`",
822 let size = Integer::from_int_ty(this, *t).size();
823 let res = f.to_i128(size.bits_usize());
824 if res.status.is_empty() {
825 // No status flags means there was no further rounding or other loss of precision.
826 Scalar::from_int(res.value, size)
828 // `f` was not representable in this integer type.
830 "`float_to_int_unchecked` intrinsic called on {} which cannot be represented in target type `{:?}`",
837 _ => bug!("`float_to_int_unchecked` called with non-int output type {:?}", dest_ty),