5 use rustc_apfloat::{Float, Round};
6 use rustc_middle::ty::layout::{IntegerExt, LayoutOf};
7 use rustc_middle::{mir, mir::BinOp, ty, ty::FloatTy};
8 use rustc_target::abi::{Align, Integer};
11 use helpers::check_arg_count;
14 MirOp(mir::BinOp, bool),
19 impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
20 pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
23 instance: ty::Instance<'tcx>,
24 args: &[OpTy<'tcx, Tag>],
25 ret: Option<(&PlaceTy<'tcx, Tag>, mir::BasicBlock)>,
26 _unwind: StackPopUnwind,
27 ) -> InterpResult<'tcx> {
28 let this = self.eval_context_mut();
30 if this.emulate_intrinsic(instance, args, ret)? {
34 // All supported intrinsics have a return place.
35 let intrinsic_name = this.tcx.item_name(instance.def_id());
36 let intrinsic_name = intrinsic_name.as_str();
37 let (dest, ret) = match ret {
38 None => throw_unsup_format!("unimplemented (diverging) intrinsic: {}", intrinsic_name),
42 // Then handle terminating intrinsics.
43 match intrinsic_name {
44 // Miri overwriting CTFE intrinsics.
45 "ptr_guaranteed_eq" => {
46 let &[ref left, ref right] = check_arg_count(args)?;
47 let left = this.read_immediate(left)?;
48 let right = this.read_immediate(right)?;
49 this.binop_ignore_overflow(mir::BinOp::Eq, &left, &right, dest)?;
51 "ptr_guaranteed_ne" => {
52 let &[ref left, ref right] = check_arg_count(args)?;
53 let left = this.read_immediate(left)?;
54 let right = this.read_immediate(right)?;
55 this.binop_ignore_overflow(mir::BinOp::Ne, &left, &right, dest)?;
58 // Raw memory accesses
60 let &[ref place] = check_arg_count(args)?;
61 let place = this.deref_operand(place)?;
62 this.copy_op(&place.into(), dest)?;
65 let &[ref place, ref dest] = check_arg_count(args)?;
66 let place = this.deref_operand(place)?;
67 this.copy_op(dest, &place.into())?;
70 "write_bytes" | "volatile_set_memory" => {
71 let &[ref ptr, ref val_byte, ref count] = check_arg_count(args)?;
72 let ty = instance.substs.type_at(0);
73 let ty_layout = this.layout_of(ty)?;
74 let val_byte = this.read_scalar(val_byte)?.to_u8()?;
75 let ptr = this.read_pointer(ptr)?;
76 let count = this.read_scalar(count)?.to_machine_usize(this)?;
77 let byte_count = ty_layout.size.checked_mul(count, this).ok_or_else(|| {
78 err_ub_format!("overflow computing total size of `{}`", intrinsic_name)
81 .write_bytes(ptr, iter::repeat(val_byte).take(byte_count.bytes() as usize))?;
84 // Floating-point operations
100 let &[ref f] = check_arg_count(args)?;
101 // FIXME: Using host floats.
102 let f = f32::from_bits(this.read_scalar(f)?.to_u32()?);
103 let f = match intrinsic_name {
105 "fabsf32" => f.abs(),
107 "sqrtf32" => f.sqrt(),
109 "exp2f32" => f.exp2(),
111 "log10f32" => f.log10(),
112 "log2f32" => f.log2(),
113 "floorf32" => f.floor(),
114 "ceilf32" => f.ceil(),
115 "truncf32" => f.trunc(),
116 "roundf32" => f.round(),
119 this.write_scalar(Scalar::from_u32(f.to_bits()), dest)?;
137 let &[ref f] = check_arg_count(args)?;
138 // FIXME: Using host floats.
139 let f = f64::from_bits(this.read_scalar(f)?.to_u64()?);
140 let f = match intrinsic_name {
142 "fabsf64" => f.abs(),
144 "sqrtf64" => f.sqrt(),
146 "exp2f64" => f.exp2(),
148 "log10f64" => f.log10(),
149 "log2f64" => f.log2(),
150 "floorf64" => f.floor(),
151 "ceilf64" => f.ceil(),
152 "truncf64" => f.trunc(),
153 "roundf64" => f.round(),
156 this.write_scalar(Scalar::from_u64(f.to_bits()), dest)?;
166 let &[ref a, ref b] = check_arg_count(args)?;
167 let a = this.read_immediate(a)?;
168 let b = this.read_immediate(b)?;
169 let op = match intrinsic_name {
170 "fadd_fast" => mir::BinOp::Add,
171 "fsub_fast" => mir::BinOp::Sub,
172 "fmul_fast" => mir::BinOp::Mul,
173 "fdiv_fast" => mir::BinOp::Div,
174 "frem_fast" => mir::BinOp::Rem,
177 let float_finite = |x: ImmTy<'tcx, _>| -> InterpResult<'tcx, bool> {
178 Ok(match x.layout.ty.kind() {
179 ty::Float(FloatTy::F32) => x.to_scalar()?.to_f32()?.is_finite(),
180 ty::Float(FloatTy::F64) => x.to_scalar()?.to_f64()?.is_finite(),
182 "`{}` called with non-float input type {:?}",
188 match (float_finite(a)?, float_finite(b)?) {
189 (false, false) => throw_ub_format!(
190 "`{}` intrinsic called with non-finite value as both parameters",
193 (false, _) => throw_ub_format!(
194 "`{}` intrinsic called with non-finite value as first parameter",
197 (_, false) => throw_ub_format!(
198 "`{}` intrinsic called with non-finite value as second parameter",
203 this.binop_ignore_overflow(op, &a, &b, dest)?;
211 let &[ref a, ref b] = check_arg_count(args)?;
212 let a = this.read_scalar(a)?.to_f32()?;
213 let b = this.read_scalar(b)?.to_f32()?;
214 let res = match intrinsic_name {
215 "minnumf32" => a.min(b),
216 "maxnumf32" => a.max(b),
217 "copysignf32" => a.copy_sign(b),
220 this.write_scalar(Scalar::from_f32(res), dest)?;
228 let &[ref a, ref b] = check_arg_count(args)?;
229 let a = this.read_scalar(a)?.to_f64()?;
230 let b = this.read_scalar(b)?.to_f64()?;
231 let res = match intrinsic_name {
232 "minnumf64" => a.min(b),
233 "maxnumf64" => a.max(b),
234 "copysignf64" => a.copy_sign(b),
237 this.write_scalar(Scalar::from_f64(res), dest)?;
241 let &[ref f, ref f2] = check_arg_count(args)?;
242 // FIXME: Using host floats.
243 let f = f32::from_bits(this.read_scalar(f)?.to_u32()?);
244 let f2 = f32::from_bits(this.read_scalar(f2)?.to_u32()?);
245 this.write_scalar(Scalar::from_u32(f.powf(f2).to_bits()), dest)?;
249 let &[ref f, ref f2] = check_arg_count(args)?;
250 // FIXME: Using host floats.
251 let f = f64::from_bits(this.read_scalar(f)?.to_u64()?);
252 let f2 = f64::from_bits(this.read_scalar(f2)?.to_u64()?);
253 this.write_scalar(Scalar::from_u64(f.powf(f2).to_bits()), dest)?;
257 let &[ref a, ref b, ref c] = check_arg_count(args)?;
258 let a = this.read_scalar(a)?.to_f32()?;
259 let b = this.read_scalar(b)?.to_f32()?;
260 let c = this.read_scalar(c)?.to_f32()?;
261 let res = a.mul_add(b, c).value;
262 this.write_scalar(Scalar::from_f32(res), dest)?;
266 let &[ref a, ref b, ref c] = check_arg_count(args)?;
267 let a = this.read_scalar(a)?.to_f64()?;
268 let b = this.read_scalar(b)?.to_f64()?;
269 let c = this.read_scalar(c)?.to_f64()?;
270 let res = a.mul_add(b, c).value;
271 this.write_scalar(Scalar::from_f64(res), dest)?;
275 let &[ref f, ref i] = check_arg_count(args)?;
276 // FIXME: Using host floats.
277 let f = f32::from_bits(this.read_scalar(f)?.to_u32()?);
278 let i = this.read_scalar(i)?.to_i32()?;
279 this.write_scalar(Scalar::from_u32(f.powi(i).to_bits()), dest)?;
283 let &[ref f, ref i] = check_arg_count(args)?;
284 // FIXME: Using host floats.
285 let f = f64::from_bits(this.read_scalar(f)?.to_u64()?);
286 let i = this.read_scalar(i)?.to_i32()?;
287 this.write_scalar(Scalar::from_u64(f.powi(i).to_bits()), dest)?;
290 "float_to_int_unchecked" => {
291 let &[ref val] = check_arg_count(args)?;
292 let val = this.read_immediate(val)?;
294 let res = match val.layout.ty.kind() {
295 ty::Float(FloatTy::F32) =>
296 this.float_to_int_unchecked(val.to_scalar()?.to_f32()?, dest.layout.ty)?,
297 ty::Float(FloatTy::F64) =>
298 this.float_to_int_unchecked(val.to_scalar()?.to_f64()?, dest.layout.ty)?,
301 "`float_to_int_unchecked` called with non-float input type {:?}",
306 this.write_scalar(res, dest)?;
318 let &[ref left, ref right] = check_arg_count(args)?;
319 let (left, left_len) = this.operand_to_simd(left)?;
320 let (right, right_len) = this.operand_to_simd(right)?;
321 let (dest, dest_len) = this.place_to_simd(dest)?;
323 assert_eq!(dest_len, left_len);
324 assert_eq!(dest_len, right_len);
326 let op = match intrinsic_name {
327 "simd_add" => mir::BinOp::Add,
328 "simd_sub" => mir::BinOp::Sub,
329 "simd_mul" => mir::BinOp::Mul,
330 "simd_div" => mir::BinOp::Div,
331 "simd_rem" => mir::BinOp::Rem,
332 "simd_shl" => mir::BinOp::Shl,
333 "simd_shr" => mir::BinOp::Shr,
337 for i in 0..dest_len {
338 let left = this.read_immediate(&this.mplace_index(&left, i)?.into())?;
339 let right = this.read_immediate(&this.mplace_index(&right, i)?.into())?;
340 let dest = this.mplace_index(&dest, i)?;
341 let (val, overflowed, ty) = this.overflowing_binary_op(op, &left, &right)?;
342 assert_eq!(ty, dest.layout.ty);
343 if matches!(op, mir::BinOp::Shl | mir::BinOp::Shr) {
344 // Shifts have extra UB as SIMD operations that the MIR binop does not have.
345 // See <https://github.com/rust-lang/rust/issues/91237>.
347 let r_val = right.to_scalar()?.to_bits(right.layout.size)?;
348 throw_ub_format!("overflowing shift by {} in `{}` in SIMD lane {}", r_val, intrinsic_name, i);
351 this.write_scalar(val, &dest.into())?;
356 "atomic_load" => this.atomic_load(args, dest, AtomicReadOp::SeqCst)?,
357 "atomic_load_relaxed" => this.atomic_load(args, dest, AtomicReadOp::Relaxed)?,
358 "atomic_load_acq" => this.atomic_load(args, dest, AtomicReadOp::Acquire)?,
360 "atomic_store" => this.atomic_store(args, AtomicWriteOp::SeqCst)?,
361 "atomic_store_relaxed" => this.atomic_store(args, AtomicWriteOp::Relaxed)?,
362 "atomic_store_rel" => this.atomic_store(args, AtomicWriteOp::Release)?,
364 "atomic_fence_acq" => this.atomic_fence(args, AtomicFenceOp::Acquire)?,
365 "atomic_fence_rel" => this.atomic_fence(args, AtomicFenceOp::Release)?,
366 "atomic_fence_acqrel" => this.atomic_fence(args, AtomicFenceOp::AcqRel)?,
367 "atomic_fence" => this.atomic_fence(args, AtomicFenceOp::SeqCst)?,
369 "atomic_singlethreadfence_acq" => this.compiler_fence(args, AtomicFenceOp::Acquire)?,
370 "atomic_singlethreadfence_rel" => this.compiler_fence(args, AtomicFenceOp::Release)?,
371 "atomic_singlethreadfence_acqrel" =>
372 this.compiler_fence(args, AtomicFenceOp::AcqRel)?,
373 "atomic_singlethreadfence" => this.compiler_fence(args, AtomicFenceOp::SeqCst)?,
375 "atomic_xchg" => this.atomic_exchange(args, dest, AtomicRwOp::SeqCst)?,
376 "atomic_xchg_acq" => this.atomic_exchange(args, dest, AtomicRwOp::Acquire)?,
377 "atomic_xchg_rel" => this.atomic_exchange(args, dest, AtomicRwOp::Release)?,
378 "atomic_xchg_acqrel" => this.atomic_exchange(args, dest, AtomicRwOp::AcqRel)?,
379 "atomic_xchg_relaxed" => this.atomic_exchange(args, dest, AtomicRwOp::Relaxed)?,
383 this.atomic_compare_exchange(args, dest, AtomicRwOp::SeqCst, AtomicReadOp::SeqCst)?,
385 "atomic_cxchg_acq" =>
386 this.atomic_compare_exchange(args, dest, AtomicRwOp::Acquire, AtomicReadOp::Acquire)?,
388 "atomic_cxchg_rel" =>
389 this.atomic_compare_exchange(args, dest, AtomicRwOp::Release, AtomicReadOp::Relaxed)?,
391 "atomic_cxchg_acqrel" =>
392 this.atomic_compare_exchange(args, dest, AtomicRwOp::AcqRel, AtomicReadOp::Acquire)?,
394 "atomic_cxchg_relaxed" =>
395 this.atomic_compare_exchange(args, dest, AtomicRwOp::Relaxed, AtomicReadOp::Relaxed)?,
397 "atomic_cxchg_acq_failrelaxed" =>
398 this.atomic_compare_exchange(args, dest, AtomicRwOp::Acquire, AtomicReadOp::Relaxed)?,
400 "atomic_cxchg_acqrel_failrelaxed" =>
401 this.atomic_compare_exchange(args, dest, AtomicRwOp::AcqRel, AtomicReadOp::Relaxed)?,
403 "atomic_cxchg_failrelaxed" =>
404 this.atomic_compare_exchange(args, dest, AtomicRwOp::SeqCst, AtomicReadOp::Relaxed)?,
406 "atomic_cxchg_failacq" =>
407 this.atomic_compare_exchange(args, dest, AtomicRwOp::SeqCst, AtomicReadOp::Acquire)?,
410 "atomic_cxchgweak" =>
411 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::SeqCst, AtomicReadOp::SeqCst)?,
413 "atomic_cxchgweak_acq" =>
414 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::Acquire, AtomicReadOp::Acquire)?,
416 "atomic_cxchgweak_rel" =>
417 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::Release, AtomicReadOp::Relaxed)?,
419 "atomic_cxchgweak_acqrel" =>
420 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::AcqRel, AtomicReadOp::Acquire)?,
422 "atomic_cxchgweak_relaxed" =>
423 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::Relaxed, AtomicReadOp::Relaxed)?,
425 "atomic_cxchgweak_acq_failrelaxed" =>
426 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::Acquire, AtomicReadOp::Relaxed)?,
428 "atomic_cxchgweak_acqrel_failrelaxed" =>
429 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::AcqRel, AtomicReadOp::Relaxed)?,
431 "atomic_cxchgweak_failrelaxed" =>
432 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::SeqCst, AtomicReadOp::Relaxed)?,
434 "atomic_cxchgweak_failacq" =>
435 this.atomic_compare_exchange_weak(args, dest, AtomicRwOp::SeqCst, AtomicReadOp::Acquire)?,
439 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOp::SeqCst)?,
442 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOp::Acquire)?,
445 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOp::Release)?,
447 "atomic_or_acqrel" =>
448 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOp::AcqRel)?,
450 "atomic_or_relaxed" =>
451 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), AtomicRwOp::Relaxed)?,
454 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOp::SeqCst)?,
457 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOp::Acquire)?,
460 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOp::Release)?,
462 "atomic_xor_acqrel" =>
463 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOp::AcqRel)?,
465 "atomic_xor_relaxed" =>
466 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), AtomicRwOp::Relaxed)?,
469 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOp::SeqCst)?,
472 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOp::Acquire)?,
475 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOp::Release)?,
477 "atomic_and_acqrel" =>
478 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOp::AcqRel)?,
480 "atomic_and_relaxed" =>
481 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), AtomicRwOp::Relaxed)?,
484 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOp::SeqCst)?,
487 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOp::Acquire)?,
490 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOp::Release)?,
492 "atomic_nand_acqrel" =>
493 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOp::AcqRel)?,
495 "atomic_nand_relaxed" =>
496 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), AtomicRwOp::Relaxed)?,
499 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOp::SeqCst)?,
502 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOp::Acquire)?,
505 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOp::Release)?,
507 "atomic_xadd_acqrel" =>
508 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOp::AcqRel)?,
510 "atomic_xadd_relaxed" =>
511 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), AtomicRwOp::Relaxed)?,
514 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOp::SeqCst)?,
517 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOp::Acquire)?,
520 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOp::Release)?,
522 "atomic_xsub_acqrel" =>
523 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOp::AcqRel)?,
525 "atomic_xsub_relaxed" =>
526 this.atomic_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), AtomicRwOp::Relaxed)?,
527 "atomic_min" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::SeqCst)?,
528 "atomic_min_acq" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::Acquire)?,
529 "atomic_min_rel" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::Release)?,
530 "atomic_min_acqrel" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::AcqRel)?,
531 "atomic_min_relaxed" =>
532 this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::Relaxed)?,
533 "atomic_max" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::SeqCst)?,
534 "atomic_max_acq" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::Acquire)?,
535 "atomic_max_rel" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::Release)?,
536 "atomic_max_acqrel" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::AcqRel)?,
537 "atomic_max_relaxed" =>
538 this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::Relaxed)?,
539 "atomic_umin" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::SeqCst)?,
540 "atomic_umin_acq" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::Acquire)?,
541 "atomic_umin_rel" => this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::Release)?,
542 "atomic_umin_acqrel" =>
543 this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::AcqRel)?,
544 "atomic_umin_relaxed" =>
545 this.atomic_op(args, dest, AtomicOp::Min, AtomicRwOp::Relaxed)?,
546 "atomic_umax" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::SeqCst)?,
547 "atomic_umax_acq" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::Acquire)?,
548 "atomic_umax_rel" => this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::Release)?,
549 "atomic_umax_acqrel" =>
550 this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::AcqRel)?,
551 "atomic_umax_relaxed" =>
552 this.atomic_op(args, dest, AtomicOp::Max, AtomicRwOp::Relaxed)?,
556 let &[ref num, ref denom] = check_arg_count(args)?;
557 this.exact_div(&this.read_immediate(num)?, &this.read_immediate(denom)?, dest)?;
560 "try" => return this.handle_try(args, dest, ret),
563 let &[] = check_arg_count(args)?;
564 // normally this would raise a SIGTRAP, which aborts if no debugger is connected
565 throw_machine_stop!(TerminationInfo::Abort("Trace/breakpoint trap".to_string()))
568 name => throw_unsup_format!("unimplemented intrinsic: {}", name),
571 trace!("{:?}", this.dump_place(**dest));
572 this.go_to_block(ret);
578 args: &[OpTy<'tcx, Tag>],
579 dest: &PlaceTy<'tcx, Tag>,
580 atomic: AtomicReadOp,
581 ) -> InterpResult<'tcx> {
582 let this = self.eval_context_mut();
584 let &[ref place] = check_arg_count(args)?;
585 let place = this.deref_operand(place)?;
587 // make sure it fits into a scalar; otherwise it cannot be atomic
588 let val = this.read_scalar_atomic(&place, atomic)?;
590 // Check alignment requirements. Atomics must always be aligned to their size,
591 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
593 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
594 this.memory.check_ptr_access_align(
598 CheckInAllocMsg::MemoryAccessTest,
600 // Perform regular access.
601 this.write_scalar(val, dest)?;
607 args: &[OpTy<'tcx, Tag>],
608 atomic: AtomicWriteOp,
609 ) -> InterpResult<'tcx> {
610 let this = self.eval_context_mut();
612 let &[ref place, ref val] = check_arg_count(args)?;
613 let place = this.deref_operand(place)?;
614 let val = this.read_scalar(val)?; // make sure it fits into a scalar; otherwise it cannot be atomic
616 // Check alignment requirements. Atomics must always be aligned to their size,
617 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
619 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
620 this.memory.check_ptr_access_align(
624 CheckInAllocMsg::MemoryAccessTest,
627 // Perform atomic store
628 this.write_scalar_atomic(val, &place, atomic)?;
634 args: &[OpTy<'tcx, Tag>],
635 atomic: AtomicFenceOp,
636 ) -> InterpResult<'tcx> {
637 let &[] = check_arg_count(args)?;
639 //FIXME: compiler fences are currently ignored
645 args: &[OpTy<'tcx, Tag>],
646 atomic: AtomicFenceOp,
647 ) -> InterpResult<'tcx> {
648 let this = self.eval_context_mut();
649 let &[] = check_arg_count(args)?;
650 this.validate_atomic_fence(atomic)?;
656 args: &[OpTy<'tcx, Tag>],
657 dest: &PlaceTy<'tcx, Tag>,
660 ) -> InterpResult<'tcx> {
661 let this = self.eval_context_mut();
663 let &[ref place, ref rhs] = check_arg_count(args)?;
664 let place = this.deref_operand(place)?;
666 if !place.layout.ty.is_integral() {
667 bug!("Atomic arithmetic operations only work on integer types");
669 let rhs = this.read_immediate(rhs)?;
671 // Check alignment requirements. Atomics must always be aligned to their size,
672 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
674 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
675 this.memory.check_ptr_access_align(
679 CheckInAllocMsg::MemoryAccessTest,
684 let old = this.atomic_min_max_scalar(&place, rhs, true, atomic)?;
685 this.write_immediate(*old, &dest)?; // old value is returned
689 let old = this.atomic_min_max_scalar(&place, rhs, false, atomic)?;
690 this.write_immediate(*old, &dest)?; // old value is returned
693 AtomicOp::MirOp(op, neg) => {
694 let old = this.atomic_op_immediate(&place, &rhs, op, neg, atomic)?;
695 this.write_immediate(*old, dest)?; // old value is returned
703 args: &[OpTy<'tcx, Tag>],
704 dest: &PlaceTy<'tcx, Tag>,
706 ) -> InterpResult<'tcx> {
707 let this = self.eval_context_mut();
709 let &[ref place, ref new] = check_arg_count(args)?;
710 let place = this.deref_operand(place)?;
711 let new = this.read_scalar(new)?;
713 // Check alignment requirements. Atomics must always be aligned to their size,
714 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
716 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
717 this.memory.check_ptr_access_align(
721 CheckInAllocMsg::MemoryAccessTest,
724 let old = this.atomic_exchange_scalar(&place, new, atomic)?;
725 this.write_scalar(old, dest)?; // old value is returned
729 fn atomic_compare_exchange_impl(
731 args: &[OpTy<'tcx, Tag>],
732 dest: &PlaceTy<'tcx, Tag>,
735 can_fail_spuriously: bool,
736 ) -> InterpResult<'tcx> {
737 let this = self.eval_context_mut();
739 let &[ref place, ref expect_old, ref new] = check_arg_count(args)?;
740 let place = this.deref_operand(place)?;
741 let expect_old = this.read_immediate(expect_old)?; // read as immediate for the sake of `binary_op()`
742 let new = this.read_scalar(new)?;
744 // Check alignment requirements. Atomics must always be aligned to their size,
745 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
747 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
748 this.memory.check_ptr_access_align(
752 CheckInAllocMsg::MemoryAccessTest,
755 let old = this.atomic_compare_exchange_scalar(
765 this.write_immediate(old, dest)?;
769 fn atomic_compare_exchange(
771 args: &[OpTy<'tcx, Tag>],
772 dest: &PlaceTy<'tcx, Tag>,
775 ) -> InterpResult<'tcx> {
776 self.atomic_compare_exchange_impl(args, dest, success, fail, false)
779 fn atomic_compare_exchange_weak(
781 args: &[OpTy<'tcx, Tag>],
782 dest: &PlaceTy<'tcx, Tag>,
785 ) -> InterpResult<'tcx> {
786 self.atomic_compare_exchange_impl(args, dest, success, fail, true)
789 fn float_to_int_unchecked<F>(
792 dest_ty: ty::Ty<'tcx>,
793 ) -> InterpResult<'tcx, Scalar<Tag>>
795 F: Float + Into<Scalar<Tag>>,
797 let this = self.eval_context_ref();
799 // Step 1: cut off the fractional part of `f`. The result of this is
800 // guaranteed to be precisely representable in IEEE floats.
801 let f = f.round_to_integral(Round::TowardZero).value;
803 // Step 2: Cast the truncated float to the target integer type and see if we lose any information in this step.
804 Ok(match dest_ty.kind() {
807 let size = Integer::from_uint_ty(this, *t).size();
808 let res = f.to_u128(size.bits_usize());
809 if res.status.is_empty() {
810 // No status flags means there was no further rounding or other loss of precision.
811 Scalar::from_uint(res.value, size)
813 // `f` was not representable in this integer type.
815 "`float_to_int_unchecked` intrinsic called on {} which cannot be represented in target type `{:?}`",
823 let size = Integer::from_int_ty(this, *t).size();
824 let res = f.to_i128(size.bits_usize());
825 if res.status.is_empty() {
826 // No status flags means there was no further rounding or other loss of precision.
827 Scalar::from_int(res.value, size)
829 // `f` was not representable in this integer type.
831 "`float_to_int_unchecked` intrinsic called on {} which cannot be represented in target type `{:?}`",
838 _ => bug!("`float_to_int_unchecked` called with non-int output type {:?}", dest_ty),