5 use rustc_attr as attr;
6 use rustc_ast::ast::FloatTy;
7 use rustc_middle::{mir, mir::BinOp, ty};
8 use rustc_middle::ty::layout::IntegerExt;
9 use rustc_apfloat::{Float, Round};
10 use rustc_target::abi::{Align, Integer, LayoutOf};
13 use helpers::check_arg_count;
15 impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
16 pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
19 instance: ty::Instance<'tcx>,
20 args: &[OpTy<'tcx, Tag>],
21 ret: Option<(PlaceTy<'tcx, Tag>, mir::BasicBlock)>,
22 _unwind: Option<mir::BasicBlock>,
23 ) -> InterpResult<'tcx> {
24 let this = self.eval_context_mut();
26 if this.emulate_intrinsic(instance, args, ret)? {
30 // All supported intrinsics have a return place.
31 let intrinsic_name = &*this.tcx.item_name(instance.def_id()).as_str();
32 let (dest, ret) = match ret {
33 None => throw_unsup_format!("unimplemented (diverging) intrinsic: {}", intrinsic_name),
37 // Then handle terminating intrinsics.
38 match intrinsic_name {
39 // Miri overwriting CTFE intrinsics.
40 "ptr_guaranteed_eq" => {
41 let &[left, right] = check_arg_count(args)?;
42 let left = this.read_immediate(left)?;
43 let right = this.read_immediate(right)?;
44 this.binop_ignore_overflow(mir::BinOp::Eq, left, right, dest)?;
46 "ptr_guaranteed_ne" => {
47 let &[left, right] = check_arg_count(args)?;
48 let left = this.read_immediate(left)?;
49 let right = this.read_immediate(right)?;
50 this.binop_ignore_overflow(mir::BinOp::Ne, left, right, dest)?;
53 // Raw memory accesses
56 | "copy_nonoverlapping"
58 let &[src, dest, count] = check_arg_count(args)?;
59 let elem_ty = instance.substs.type_at(0);
60 let elem_layout = this.layout_of(elem_ty)?;
61 let count = this.read_scalar(count)?.to_machine_usize(this)?;
62 let elem_align = elem_layout.align.abi;
64 let size = elem_layout.size.checked_mul(count, this)
65 .ok_or_else(|| err_ub_format!("overflow computing total size of `{}`", intrinsic_name))?;
66 let src = this.read_scalar(src)?.check_init()?;
67 let src = this.memory.check_ptr_access(src, size, elem_align)?;
68 let dest = this.read_scalar(dest)?.check_init()?;
69 let dest = this.memory.check_ptr_access(dest, size, elem_align)?;
71 if let (Some(src), Some(dest)) = (src, dest) {
76 intrinsic_name.ends_with("_nonoverlapping"),
82 let &[place, dest] = check_arg_count(args)?;
83 let place = this.deref_operand(place)?;
84 this.copy_op(dest, place.into())?;
88 let &[place] = check_arg_count(args)?;
89 let place = this.deref_operand(place)?;
90 this.copy_op(place.into(), dest)?;
93 let &[place, dest] = check_arg_count(args)?;
94 let place = this.deref_operand(place)?;
95 this.copy_op(dest, place.into())?;
99 let &[ptr, val_byte, count] = check_arg_count(args)?;
100 let ty = instance.substs.type_at(0);
101 let ty_layout = this.layout_of(ty)?;
102 let val_byte = this.read_scalar(val_byte)?.to_u8()?;
103 let ptr = this.read_scalar(ptr)?.check_init()?;
104 let count = this.read_scalar(count)?.to_machine_usize(this)?;
105 let byte_count = ty_layout.size.checked_mul(count, this)
106 .ok_or_else(|| err_ub_format!("overflow computing total size of `write_bytes`"))?;
108 .write_bytes(ptr, iter::repeat(val_byte).take(byte_count.bytes() as usize))?;
111 // Floating-point operations
127 let &[f] = check_arg_count(args)?;
128 // FIXME: Using host floats.
129 let f = f32::from_bits(this.read_scalar(f)?.to_u32()?);
130 let f = match intrinsic_name {
132 "fabsf32" => f.abs(),
134 "sqrtf32" => f.sqrt(),
136 "exp2f32" => f.exp2(),
138 "log10f32" => f.log10(),
139 "log2f32" => f.log2(),
140 "floorf32" => f.floor(),
141 "ceilf32" => f.ceil(),
142 "truncf32" => f.trunc(),
143 "roundf32" => f.round(),
146 this.write_scalar(Scalar::from_u32(f.to_bits()), dest)?;
164 let &[f] = check_arg_count(args)?;
165 // FIXME: Using host floats.
166 let f = f64::from_bits(this.read_scalar(f)?.to_u64()?);
167 let f = match intrinsic_name {
169 "fabsf64" => f.abs(),
171 "sqrtf64" => f.sqrt(),
173 "exp2f64" => f.exp2(),
175 "log10f64" => f.log10(),
176 "log2f64" => f.log2(),
177 "floorf64" => f.floor(),
178 "ceilf64" => f.ceil(),
179 "truncf64" => f.trunc(),
180 "roundf64" => f.round(),
183 this.write_scalar(Scalar::from_u64(f.to_bits()), dest)?;
193 let &[a, b] = check_arg_count(args)?;
194 let a = this.read_immediate(a)?;
195 let b = this.read_immediate(b)?;
196 let op = match intrinsic_name {
197 "fadd_fast" => mir::BinOp::Add,
198 "fsub_fast" => mir::BinOp::Sub,
199 "fmul_fast" => mir::BinOp::Mul,
200 "fdiv_fast" => mir::BinOp::Div,
201 "frem_fast" => mir::BinOp::Rem,
204 this.binop_ignore_overflow(op, a, b, dest)?;
212 let &[a, b] = check_arg_count(args)?;
213 let a = this.read_scalar(a)?.to_f32()?;
214 let b = this.read_scalar(b)?.to_f32()?;
215 let res = match intrinsic_name {
216 "minnumf32" => a.min(b),
217 "maxnumf32" => a.max(b),
218 "copysignf32" => a.copy_sign(b),
221 this.write_scalar(Scalar::from_f32(res), dest)?;
229 let &[a, b] = check_arg_count(args)?;
230 let a = this.read_scalar(a)?.to_f64()?;
231 let b = this.read_scalar(b)?.to_f64()?;
232 let res = match intrinsic_name {
233 "minnumf64" => a.min(b),
234 "maxnumf64" => a.max(b),
235 "copysignf64" => a.copy_sign(b),
238 this.write_scalar(Scalar::from_f64(res), dest)?;
242 let &[f, f2] = check_arg_count(args)?;
243 // FIXME: Using host floats.
244 let f = f32::from_bits(this.read_scalar(f)?.to_u32()?);
245 let f2 = f32::from_bits(this.read_scalar(f2)?.to_u32()?);
246 this.write_scalar(Scalar::from_u32(f.powf(f2).to_bits()), dest)?;
250 let &[f, f2] = check_arg_count(args)?;
251 // FIXME: Using host floats.
252 let f = f64::from_bits(this.read_scalar(f)?.to_u64()?);
253 let f2 = f64::from_bits(this.read_scalar(f2)?.to_u64()?);
254 this.write_scalar(Scalar::from_u64(f.powf(f2).to_bits()), dest)?;
258 let &[a, b, c] = check_arg_count(args)?;
259 let a = this.read_scalar(a)?.to_f32()?;
260 let b = this.read_scalar(b)?.to_f32()?;
261 let c = this.read_scalar(c)?.to_f32()?;
262 let res = a.mul_add(b, c).value;
263 this.write_scalar(Scalar::from_f32(res), dest)?;
267 let &[a, b, c] = check_arg_count(args)?;
268 let a = this.read_scalar(a)?.to_f64()?;
269 let b = this.read_scalar(b)?.to_f64()?;
270 let c = this.read_scalar(c)?.to_f64()?;
271 let res = a.mul_add(b, c).value;
272 this.write_scalar(Scalar::from_f64(res), dest)?;
276 let &[f, i] = check_arg_count(args)?;
277 // FIXME: Using host floats.
278 let f = f32::from_bits(this.read_scalar(f)?.to_u32()?);
279 let i = this.read_scalar(i)?.to_i32()?;
280 this.write_scalar(Scalar::from_u32(f.powi(i).to_bits()), dest)?;
284 let &[f, i] = check_arg_count(args)?;
285 // FIXME: Using host floats.
286 let f = f64::from_bits(this.read_scalar(f)?.to_u64()?);
287 let i = this.read_scalar(i)?.to_i32()?;
288 this.write_scalar(Scalar::from_u64(f.powi(i).to_bits()), dest)?;
291 "float_to_int_unchecked" => {
292 let &[val] = check_arg_count(args)?;
293 let val = this.read_immediate(val)?;
295 let res = match val.layout.ty.kind() {
296 ty::Float(FloatTy::F32) => {
297 this.float_to_int_unchecked(val.to_scalar()?.to_f32()?, dest.layout.ty)?
299 ty::Float(FloatTy::F64) => {
300 this.float_to_int_unchecked(val.to_scalar()?.to_f64()?, dest.layout.ty)?
302 _ => bug!("`float_to_int_unchecked` called with non-float input type {:?}", val.layout.ty),
305 this.write_scalar(res, dest)?;
309 "atomic_load" => this.atomic_load(args, dest, AtomicReadOp::SeqCst)?,
310 "atomic_load_relaxed" => this.atomic_load(args, dest, AtomicReadOp::Relaxed)?,
311 "atomic_load_acq" => this.atomic_load(args, dest, AtomicReadOp::Acquire)?,
313 "atomic_store" => this.atomic_store(args, AtomicWriteOp::SeqCst)?,
314 "atomic_store_relaxed" => this.atomic_store(args, AtomicWriteOp::Relaxed)?,
315 "atomic_store_rel" => this.atomic_store(args, AtomicWriteOp::Release)?,
317 "atomic_fence_acq" => this.atomic_fence(args, AtomicFenceOp::Acquire)?,
318 "atomic_fence_rel" => this.atomic_fence(args, AtomicFenceOp::Release)?,
319 "atomic_fence_acqrel" => this.atomic_fence(args, AtomicFenceOp::AcqRel)?,
320 "atomic_fence" => this.atomic_fence(args, AtomicFenceOp::SeqCst)?,
322 "atomic_singlethreadfence_acq" => this.compiler_fence(args, AtomicFenceOp::Acquire)?,
323 "atomic_singlethreadfence_rel" => this.compiler_fence(args, AtomicFenceOp::Release)?,
324 "atomic_singlethreadfence_acqrel" => this.compiler_fence(args, AtomicFenceOp::AcqRel)?,
325 "atomic_singlethreadfence" => this.compiler_fence(args, AtomicFenceOp::SeqCst)?,
327 "atomic_xchg" => this.atomic_exchange(args, dest, AtomicRWOp::SeqCst)?,
328 "atomic_xchg_acq" => this.atomic_exchange(args, dest, AtomicRWOp::Acquire)?,
329 "atomic_xchg_rel" => this.atomic_exchange(args, dest, AtomicRWOp::Release)?,
330 "atomic_xchg_acqrel" => this.atomic_exchange(args, dest, AtomicRWOp::AcqRel)?,
331 "atomic_xchg_relaxed" => this.atomic_exchange(args, dest, AtomicRWOp::Relaxed)?,
333 "atomic_cxchg" => this.atomic_compare_exchange(
334 args, dest, AtomicRWOp::SeqCst, AtomicReadOp::SeqCst
336 "atomic_cxchg_acq" => this.atomic_compare_exchange(
337 args, dest, AtomicRWOp::Acquire, AtomicReadOp::Acquire
339 "atomic_cxchg_rel" => this.atomic_compare_exchange(
340 args, dest, AtomicRWOp::Release, AtomicReadOp::Relaxed
342 "atomic_cxchg_acqrel" => this.atomic_compare_exchange
343 (args, dest, AtomicRWOp::AcqRel, AtomicReadOp::Acquire
345 "atomic_cxchg_relaxed" => this.atomic_compare_exchange(
346 args, dest, AtomicRWOp::Relaxed, AtomicReadOp::Relaxed
348 "atomic_cxchg_acq_failrelaxed" => this.atomic_compare_exchange(
349 args, dest, AtomicRWOp::Acquire, AtomicReadOp::Relaxed
351 "atomic_cxchg_acqrel_failrelaxed" => this.atomic_compare_exchange(
352 args, dest, AtomicRWOp::AcqRel, AtomicReadOp::Relaxed
354 "atomic_cxchg_failrelaxed" => this.atomic_compare_exchange(
355 args, dest, AtomicRWOp::SeqCst, AtomicReadOp::Relaxed
357 "atomic_cxchg_failacq" => this.atomic_compare_exchange(
358 args, dest, AtomicRWOp::SeqCst, AtomicReadOp::Acquire
361 "atomic_cxchgweak" => this.atomic_compare_exchange_weak(
362 args, dest, AtomicRWOp::SeqCst, AtomicReadOp::SeqCst
364 "atomic_cxchgweak_acq" => this.atomic_compare_exchange_weak(
365 args, dest, AtomicRWOp::Acquire, AtomicReadOp::Acquire
367 "atomic_cxchgweak_rel" => this.atomic_compare_exchange_weak(
368 args, dest, AtomicRWOp::Release, AtomicReadOp::Relaxed
370 "atomic_cxchgweak_acqrel" => this.atomic_compare_exchange_weak(
371 args, dest, AtomicRWOp::AcqRel, AtomicReadOp::Acquire
373 "atomic_cxchgweak_relaxed" => this.atomic_compare_exchange_weak(
374 args, dest, AtomicRWOp::Relaxed, AtomicReadOp::Relaxed
376 "atomic_cxchgweak_acq_failrelaxed" => this.atomic_compare_exchange_weak(
377 args, dest, AtomicRWOp::Acquire, AtomicReadOp::Relaxed
379 "atomic_cxchgweak_acqrel_failrelaxed" => this.atomic_compare_exchange_weak(
380 args, dest, AtomicRWOp::AcqRel, AtomicReadOp::Relaxed
382 "atomic_cxchgweak_failrelaxed" => this.atomic_compare_exchange_weak(
383 args, dest, AtomicRWOp::SeqCst, AtomicReadOp::Relaxed
385 "atomic_cxchgweak_failacq" => this.atomic_compare_exchange_weak(
386 args, dest, AtomicRWOp::SeqCst, AtomicReadOp::Acquire
389 "atomic_or" => this.atomic_op(args, dest, BinOp::BitOr, false, AtomicRWOp::SeqCst)?,
390 "atomic_or_acq" => this.atomic_op(args, dest, BinOp::BitOr, false, AtomicRWOp::Acquire)?,
391 "atomic_or_rel" => this.atomic_op(args, dest, BinOp::BitOr, false, AtomicRWOp::Release)?,
392 "atomic_or_acqrel" => this.atomic_op(args, dest, BinOp::BitOr, false, AtomicRWOp::AcqRel)?,
393 "atomic_or_relaxed" => this.atomic_op(args, dest, BinOp::BitOr, false, AtomicRWOp::Relaxed)?,
394 "atomic_xor" => this.atomic_op(args, dest, BinOp::BitXor, false, AtomicRWOp::SeqCst)?,
395 "atomic_xor_acq" => this.atomic_op(args, dest, BinOp::BitXor, false, AtomicRWOp::Acquire)?,
396 "atomic_xor_rel" => this.atomic_op(args, dest, BinOp::BitXor, false, AtomicRWOp::Release)?,
397 "atomic_xor_acqrel" => this.atomic_op(args, dest, BinOp::BitXor, false, AtomicRWOp::AcqRel)?,
398 "atomic_xor_relaxed" => this.atomic_op(args, dest, BinOp::BitXor, false, AtomicRWOp::Relaxed)?,
399 "atomic_and" => this.atomic_op(args, dest, BinOp::BitAnd, false, AtomicRWOp::SeqCst)?,
400 "atomic_and_acq" => this.atomic_op(args, dest, BinOp::BitAnd, false, AtomicRWOp::Acquire)?,
401 "atomic_and_rel" => this.atomic_op(args, dest, BinOp::BitAnd, false, AtomicRWOp::Release)?,
402 "atomic_and_acqrel" => this.atomic_op(args, dest, BinOp::BitAnd, false, AtomicRWOp::AcqRel)?,
403 "atomic_and_relaxed" => this.atomic_op(args, dest, BinOp::BitAnd, false, AtomicRWOp::Relaxed)?,
404 "atomic_nand" => this.atomic_op(args, dest, BinOp::BitAnd, true, AtomicRWOp::SeqCst)?,
405 "atomic_nand_acq" => this.atomic_op(args, dest, BinOp::BitAnd, true, AtomicRWOp::Acquire)?,
406 "atomic_nand_rel" => this.atomic_op(args, dest, BinOp::BitAnd, true, AtomicRWOp::Release)?,
407 "atomic_nand_acqrel" => this.atomic_op(args, dest, BinOp::BitAnd, true, AtomicRWOp::AcqRel)?,
408 "atomic_nand_relaxed" => this.atomic_op(args, dest, BinOp::BitAnd, true, AtomicRWOp::Relaxed)?,
409 "atomic_xadd" => this.atomic_op(args, dest, BinOp::Add, false, AtomicRWOp::SeqCst)?,
410 "atomic_xadd_acq" => this.atomic_op(args, dest, BinOp::Add, false, AtomicRWOp::Acquire)?,
411 "atomic_xadd_rel" => this.atomic_op(args, dest, BinOp::Add, false, AtomicRWOp::Release)?,
412 "atomic_xadd_acqrel" => this.atomic_op(args, dest, BinOp::Add, false, AtomicRWOp::AcqRel)?,
413 "atomic_xadd_relaxed" => this.atomic_op(args, dest, BinOp::Add, false, AtomicRWOp::Relaxed)?,
414 "atomic_xsub" => this.atomic_op(args, dest, BinOp::Sub, false, AtomicRWOp::SeqCst)?,
415 "atomic_xsub_acq" => this.atomic_op(args, dest, BinOp::Sub, false, AtomicRWOp::Acquire)?,
416 "atomic_xsub_rel" => this.atomic_op(args, dest, BinOp::Sub, false, AtomicRWOp::Release)?,
417 "atomic_xsub_acqrel" => this.atomic_op(args, dest, BinOp::Sub, false, AtomicRWOp::AcqRel)?,
418 "atomic_xsub_relaxed" => this.atomic_op(args, dest, BinOp::Sub, false, AtomicRWOp::Relaxed)?,
421 // Query type information
423 "assert_zero_valid" |
424 "assert_uninit_valid" => {
425 let &[] = check_arg_count(args)?;
426 let ty = instance.substs.type_at(0);
427 let layout = this.layout_of(ty)?;
428 // Abort here because the caller might not be panic safe.
429 if layout.abi.is_uninhabited() {
430 throw_machine_stop!(TerminationInfo::Abort(Some(format!("attempted to instantiate uninhabited type `{}`", ty))))
432 if intrinsic_name == "assert_zero_valid" && !layout.might_permit_raw_init(this, /*zero:*/ true).unwrap() {
433 throw_machine_stop!(TerminationInfo::Abort(Some(format!("attempted to zero-initialize type `{}`, which is invalid", ty))))
435 if intrinsic_name == "assert_uninit_valid" && !layout.might_permit_raw_init(this, /*zero:*/ false).unwrap() {
436 throw_machine_stop!(TerminationInfo::Abort(Some(format!("attempted to leave type `{}` uninitialized, which is invalid", ty))))
442 let &[num, denom] = check_arg_count(args)?;
443 this.exact_div(this.read_immediate(num)?, this.read_immediate(denom)?, dest)?;
447 // We get an argument... and forget about it.
448 let &[_] = check_arg_count(args)?;
451 "try" => return this.handle_try(args, dest, ret),
453 name => throw_unsup_format!("unimplemented intrinsic: {}", name),
456 trace!("{:?}", this.dump_place(*dest));
457 this.go_to_block(ret);
462 &mut self, args: &[OpTy<'tcx, Tag>], dest: PlaceTy<'tcx, Tag>,
464 ) -> InterpResult<'tcx> {
465 let this = self.eval_context_mut();
468 let &[place] = check_arg_count(args)?;
469 let place = this.deref_operand(place)?;
471 // make sure it fits into a scalar; otherwise it cannot be atomic
472 let val = this.read_scalar_atomic(place, atomic)?;
474 // Check alignment requirements. Atomics must always be aligned to their size,
475 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
477 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
478 this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
479 this.write_scalar(val, dest)?;
483 fn atomic_store(&mut self, args: &[OpTy<'tcx, Tag>], atomic: AtomicWriteOp) -> InterpResult<'tcx> {
484 let this = self.eval_context_mut();
486 let &[place, val] = check_arg_count(args)?;
487 let place = this.deref_operand(place)?;
488 let val = this.read_scalar(val)?; // make sure it fits into a scalar; otherwise it cannot be atomic
490 // Check alignment requirements. Atomics must always be aligned to their size,
491 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
493 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
494 this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
496 // Perform atomic store
497 this.write_scalar_atomic(val, place, atomic)?;
501 fn compiler_fence(&mut self, args: &[OpTy<'tcx, Tag>], atomic: AtomicFenceOp) -> InterpResult<'tcx> {
502 let &[] = check_arg_count(args)?;
504 //FIXME: compiler fences are currently ignored
508 fn atomic_fence(&mut self, args: &[OpTy<'tcx, Tag>], atomic: AtomicFenceOp) -> InterpResult<'tcx> {
509 let this = self.eval_context_mut();
510 let &[] = check_arg_count(args)?;
511 this.validate_atomic_fence(atomic)?;
516 &mut self, args: &[OpTy<'tcx, Tag>], dest: PlaceTy<'tcx, Tag>,
517 op: mir::BinOp, neg: bool, atomic: AtomicRWOp
518 ) -> InterpResult<'tcx> {
519 let this = self.eval_context_mut();
521 let &[place, rhs] = check_arg_count(args)?;
522 let place = this.deref_operand(place)?;
523 if !place.layout.ty.is_integral() {
524 bug!("Atomic arithmetic operations only work on integer types");
526 let rhs = this.read_immediate(rhs)?;
527 let old = this.allow_data_races_mut(|this| {
528 this.read_immediate(place. into())
531 // Check alignment requirements. Atomics must always be aligned to their size,
532 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
534 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
535 this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
536 this.write_immediate(*old, dest)?; // old value is returned
538 // Atomics wrap around on overflow.
539 let val = this.binary_op(op, old, rhs)?;
540 let val = if neg { this.unary_op(mir::UnOp::Not, val)? } else { val };
541 this.allow_data_races_mut(|this| {
542 this.write_immediate(*val, place.into())
545 this.validate_atomic_rmw(place, atomic)?;
550 &mut self, args: &[OpTy<'tcx, Tag>], dest: PlaceTy<'tcx, Tag>, atomic: AtomicRWOp
551 ) -> InterpResult<'tcx> {
552 let this = self.eval_context_mut();
554 let &[place, new] = check_arg_count(args)?;
555 let place = this.deref_operand(place)?;
556 let new = this.read_scalar(new)?;
557 let old = this.allow_data_races_mut(|this| {
558 this.read_scalar(place.into())
561 // Check alignment requirements. Atomics must always be aligned to their size,
562 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
564 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
565 this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
567 this.write_scalar(old, dest)?; // old value is returned
568 this.allow_data_races_mut(|this| {
569 this.write_scalar(new, place.into())
572 this.validate_atomic_rmw(place, atomic)?;
576 fn atomic_compare_exchange(
577 &mut self, args: &[OpTy<'tcx, Tag>], dest: PlaceTy<'tcx, Tag>,
578 success: AtomicRWOp, fail: AtomicReadOp
579 ) -> InterpResult<'tcx> {
580 let this = self.eval_context_mut();
582 let &[place, expect_old, new] = check_arg_count(args)?;
583 let place = this.deref_operand(place)?;
584 let expect_old = this.read_immediate(expect_old)?; // read as immediate for the sake of `binary_op()`
585 let new = this.read_scalar(new)?;
587 // Failure ordering cannot be stronger than success ordering, therefore first attempt
588 // to read with the failure ordering and if successfull then try again with the success
589 // read ordering and write in the success case.
590 // Read as immediate for the sake of `binary_op()`
591 let old = this.allow_data_races_mut(|this| {
592 this.read_immediate(place.into())
595 // Check alignment requirements. Atomics must always be aligned to their size,
596 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
598 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
599 this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
601 // `binary_op` will bail if either of them is not a scalar.
602 let eq = this.overflowing_binary_op(mir::BinOp::Eq, old, expect_old)?.0;
603 let res = Immediate::ScalarPair(old.to_scalar_or_uninit(), eq.into());
606 this.write_immediate(res, dest)?;
608 // Update ptr depending on comparison.
609 // if successful, perform a full rw-atomic validation
610 // otherwise treat this as an atomic load with the fail ordering
612 this.allow_data_races_mut(|this| {
613 this.write_scalar(new, place.into())
615 this.validate_atomic_rmw(place, success)?;
617 this.validate_atomic_load(place, fail)?;
623 fn atomic_compare_exchange_weak(
624 &mut self, args: &[OpTy<'tcx, Tag>], dest: PlaceTy<'tcx, Tag>,
625 success: AtomicRWOp, fail: AtomicReadOp
626 ) -> InterpResult<'tcx> {
628 // FIXME: the weak part of this is currently not modelled,
629 // it is assumed to always succeed unconditionally.
630 self.atomic_compare_exchange(args, dest, success, fail)
633 fn float_to_int_unchecked<F>(
636 dest_ty: ty::Ty<'tcx>,
637 ) -> InterpResult<'tcx, Scalar<Tag>>
639 F: Float + Into<Scalar<Tag>>
641 let this = self.eval_context_ref();
643 // Step 1: cut off the fractional part of `f`. The result of this is
644 // guaranteed to be precisely representable in IEEE floats.
645 let f = f.round_to_integral(Round::TowardZero).value;
647 // Step 2: Cast the truncated float to the target integer type and see if we lose any information in this step.
648 Ok(match dest_ty.kind() {
651 let size = Integer::from_attr(this, attr::IntType::UnsignedInt(*t)).size();
652 let res = f.to_u128(size.bits_usize());
653 if res.status.is_empty() {
654 // No status flags means there was no further rounding or other loss of precision.
655 Scalar::from_uint(res.value, size)
657 // `f` was not representable in this integer type.
659 "`float_to_int_unchecked` intrinsic called on {} which cannot be represented in target type `{:?}`",
666 let size = Integer::from_attr(this, attr::IntType::SignedInt(*t)).size();
667 let res = f.to_i128(size.bits_usize());
668 if res.status.is_empty() {
669 // No status flags means there was no further rounding or other loss of precision.
670 Scalar::from_int(res.value, size)
672 // `f` was not representable in this integer type.
674 "`float_to_int_unchecked` intrinsic called on {} which cannot be represented in target type `{:?}`",
680 _ => bug!("`float_to_int_unchecked` called with non-int output type {:?}", dest_ty),