]> git.lizzy.rs Git - rust.git/blob - src/shims/intrinsics.rs
Split out vector_clock to separate file, general tidy up of some of the
[rust.git] / src / shims / intrinsics.rs
1 use std::iter;
2
3 use log::trace;
4
5 use rustc_attr as attr;
6 use rustc_ast::ast::FloatTy;
7 use rustc_middle::{mir, mir::BinOp, ty};
8 use rustc_middle::ty::layout::IntegerExt;
9 use rustc_apfloat::{Float, Round};
10 use rustc_target::abi::{Align, Integer, LayoutOf};
11
12 use crate::*;
13 use helpers::check_arg_count;
14
15 impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
16 pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
17     fn call_intrinsic(
18         &mut self,
19         instance: ty::Instance<'tcx>,
20         args: &[OpTy<'tcx, Tag>],
21         ret: Option<(PlaceTy<'tcx, Tag>, mir::BasicBlock)>,
22         _unwind: Option<mir::BasicBlock>,
23     ) -> InterpResult<'tcx> {
24         let this = self.eval_context_mut();
25
26         if this.emulate_intrinsic(instance, args, ret)? {
27             return Ok(());
28         }
29
30         // All supported intrinsics have a return place.
31         let intrinsic_name = &*this.tcx.item_name(instance.def_id()).as_str();
32         let (dest, ret) = match ret {
33             None => throw_unsup_format!("unimplemented (diverging) intrinsic: {}", intrinsic_name),
34             Some(p) => p,
35         };
36
37         // Then handle terminating intrinsics.
38         match intrinsic_name {
39             // Miri overwriting CTFE intrinsics.
40             "ptr_guaranteed_eq" => {
41                 let &[left, right] = check_arg_count(args)?;
42                 let left = this.read_immediate(left)?;
43                 let right = this.read_immediate(right)?;
44                 this.binop_ignore_overflow(mir::BinOp::Eq, left, right, dest)?;
45             }
46             "ptr_guaranteed_ne" => {
47                 let &[left, right] = check_arg_count(args)?;
48                 let left = this.read_immediate(left)?;
49                 let right = this.read_immediate(right)?;
50                 this.binop_ignore_overflow(mir::BinOp::Ne, left, right, dest)?;
51             }
52
53             // Raw memory accesses
54             #[rustfmt::skip]
55             | "copy"
56             | "copy_nonoverlapping"
57             => {
58                 let &[src, dest, count] = check_arg_count(args)?;
59                 let elem_ty = instance.substs.type_at(0);
60                 let elem_layout = this.layout_of(elem_ty)?;
61                 let count = this.read_scalar(count)?.to_machine_usize(this)?;
62                 let elem_align = elem_layout.align.abi;
63
64                 let size = elem_layout.size.checked_mul(count, this)
65                     .ok_or_else(|| err_ub_format!("overflow computing total size of `{}`", intrinsic_name))?;
66                 let src = this.read_scalar(src)?.check_init()?;
67                 let src = this.memory.check_ptr_access(src, size, elem_align)?;
68                 let dest = this.read_scalar(dest)?.check_init()?;
69                 let dest = this.memory.check_ptr_access(dest, size, elem_align)?;
70
71                 if let (Some(src), Some(dest)) = (src, dest) {
72                     this.memory.copy(
73                         src,
74                         dest,
75                         size,
76                         intrinsic_name.ends_with("_nonoverlapping"),
77                     )?;
78                 }
79             }
80
81             "move_val_init" => {
82                 let &[place, dest] = check_arg_count(args)?;
83                 let place = this.deref_operand(place)?;
84                 this.copy_op(dest, place.into())?;
85             }
86
87             "volatile_load" => {
88                 let &[place] = check_arg_count(args)?;
89                 let place = this.deref_operand(place)?;
90                 this.copy_op(place.into(), dest)?;
91             }
92             "volatile_store" => {
93                 let &[place, dest] = check_arg_count(args)?;
94                 let place = this.deref_operand(place)?;
95                 this.copy_op(dest, place.into())?;
96             }
97
98             "write_bytes" => {
99                 let &[ptr, val_byte, count] = check_arg_count(args)?;
100                 let ty = instance.substs.type_at(0);
101                 let ty_layout = this.layout_of(ty)?;
102                 let val_byte = this.read_scalar(val_byte)?.to_u8()?;
103                 let ptr = this.read_scalar(ptr)?.check_init()?;
104                 let count = this.read_scalar(count)?.to_machine_usize(this)?;
105                 let byte_count = ty_layout.size.checked_mul(count, this)
106                     .ok_or_else(|| err_ub_format!("overflow computing total size of `write_bytes`"))?;
107                 this.memory
108                     .write_bytes(ptr, iter::repeat(val_byte).take(byte_count.bytes() as usize))?;
109             }
110
111             // Floating-point operations
112             #[rustfmt::skip]
113             | "sinf32"
114             | "fabsf32"
115             | "cosf32"
116             | "sqrtf32"
117             | "expf32"
118             | "exp2f32"
119             | "logf32"
120             | "log10f32"
121             | "log2f32"
122             | "floorf32"
123             | "ceilf32"
124             | "truncf32"
125             | "roundf32"
126             => {
127                 let &[f] = check_arg_count(args)?;
128                 // FIXME: Using host floats.
129                 let f = f32::from_bits(this.read_scalar(f)?.to_u32()?);
130                 let f = match intrinsic_name {
131                     "sinf32" => f.sin(),
132                     "fabsf32" => f.abs(),
133                     "cosf32" => f.cos(),
134                     "sqrtf32" => f.sqrt(),
135                     "expf32" => f.exp(),
136                     "exp2f32" => f.exp2(),
137                     "logf32" => f.ln(),
138                     "log10f32" => f.log10(),
139                     "log2f32" => f.log2(),
140                     "floorf32" => f.floor(),
141                     "ceilf32" => f.ceil(),
142                     "truncf32" => f.trunc(),
143                     "roundf32" => f.round(),
144                     _ => bug!(),
145                 };
146                 this.write_scalar(Scalar::from_u32(f.to_bits()), dest)?;
147             }
148
149             #[rustfmt::skip]
150             | "sinf64"
151             | "fabsf64"
152             | "cosf64"
153             | "sqrtf64"
154             | "expf64"
155             | "exp2f64"
156             | "logf64"
157             | "log10f64"
158             | "log2f64"
159             | "floorf64"
160             | "ceilf64"
161             | "truncf64"
162             | "roundf64"
163             => {
164                 let &[f] = check_arg_count(args)?;
165                 // FIXME: Using host floats.
166                 let f = f64::from_bits(this.read_scalar(f)?.to_u64()?);
167                 let f = match intrinsic_name {
168                     "sinf64" => f.sin(),
169                     "fabsf64" => f.abs(),
170                     "cosf64" => f.cos(),
171                     "sqrtf64" => f.sqrt(),
172                     "expf64" => f.exp(),
173                     "exp2f64" => f.exp2(),
174                     "logf64" => f.ln(),
175                     "log10f64" => f.log10(),
176                     "log2f64" => f.log2(),
177                     "floorf64" => f.floor(),
178                     "ceilf64" => f.ceil(),
179                     "truncf64" => f.trunc(),
180                     "roundf64" => f.round(),
181                     _ => bug!(),
182                 };
183                 this.write_scalar(Scalar::from_u64(f.to_bits()), dest)?;
184             }
185
186             #[rustfmt::skip]
187             | "fadd_fast"
188             | "fsub_fast"
189             | "fmul_fast"
190             | "fdiv_fast"
191             | "frem_fast"
192             => {
193                 let &[a, b] = check_arg_count(args)?;
194                 let a = this.read_immediate(a)?;
195                 let b = this.read_immediate(b)?;
196                 let op = match intrinsic_name {
197                     "fadd_fast" => mir::BinOp::Add,
198                     "fsub_fast" => mir::BinOp::Sub,
199                     "fmul_fast" => mir::BinOp::Mul,
200                     "fdiv_fast" => mir::BinOp::Div,
201                     "frem_fast" => mir::BinOp::Rem,
202                     _ => bug!(),
203                 };
204                 this.binop_ignore_overflow(op, a, b, dest)?;
205             }
206
207             #[rustfmt::skip]
208             | "minnumf32"
209             | "maxnumf32"
210             | "copysignf32"
211             => {
212                 let &[a, b] = check_arg_count(args)?;
213                 let a = this.read_scalar(a)?.to_f32()?;
214                 let b = this.read_scalar(b)?.to_f32()?;
215                 let res = match intrinsic_name {
216                     "minnumf32" => a.min(b),
217                     "maxnumf32" => a.max(b),
218                     "copysignf32" => a.copy_sign(b),
219                     _ => bug!(),
220                 };
221                 this.write_scalar(Scalar::from_f32(res), dest)?;
222             }
223
224             #[rustfmt::skip]
225             | "minnumf64"
226             | "maxnumf64"
227             | "copysignf64"
228             => {
229                 let &[a, b] = check_arg_count(args)?;
230                 let a = this.read_scalar(a)?.to_f64()?;
231                 let b = this.read_scalar(b)?.to_f64()?;
232                 let res = match intrinsic_name {
233                     "minnumf64" => a.min(b),
234                     "maxnumf64" => a.max(b),
235                     "copysignf64" => a.copy_sign(b),
236                     _ => bug!(),
237                 };
238                 this.write_scalar(Scalar::from_f64(res), dest)?;
239             }
240
241             "powf32" => {
242                 let &[f, f2] = check_arg_count(args)?;
243                 // FIXME: Using host floats.
244                 let f = f32::from_bits(this.read_scalar(f)?.to_u32()?);
245                 let f2 = f32::from_bits(this.read_scalar(f2)?.to_u32()?);
246                 this.write_scalar(Scalar::from_u32(f.powf(f2).to_bits()), dest)?;
247             }
248
249             "powf64" => {
250                 let &[f, f2] = check_arg_count(args)?;
251                 // FIXME: Using host floats.
252                 let f = f64::from_bits(this.read_scalar(f)?.to_u64()?);
253                 let f2 = f64::from_bits(this.read_scalar(f2)?.to_u64()?);
254                 this.write_scalar(Scalar::from_u64(f.powf(f2).to_bits()), dest)?;
255             }
256
257             "fmaf32" => {
258                 let &[a, b, c] = check_arg_count(args)?;
259                 let a = this.read_scalar(a)?.to_f32()?;
260                 let b = this.read_scalar(b)?.to_f32()?;
261                 let c = this.read_scalar(c)?.to_f32()?;
262                 let res = a.mul_add(b, c).value;
263                 this.write_scalar(Scalar::from_f32(res), dest)?;
264             }
265
266             "fmaf64" => {
267                 let &[a, b, c] = check_arg_count(args)?;
268                 let a = this.read_scalar(a)?.to_f64()?;
269                 let b = this.read_scalar(b)?.to_f64()?;
270                 let c = this.read_scalar(c)?.to_f64()?;
271                 let res = a.mul_add(b, c).value;
272                 this.write_scalar(Scalar::from_f64(res), dest)?;
273             }
274
275             "powif32" => {
276                 let &[f, i] = check_arg_count(args)?;
277                 // FIXME: Using host floats.
278                 let f = f32::from_bits(this.read_scalar(f)?.to_u32()?);
279                 let i = this.read_scalar(i)?.to_i32()?;
280                 this.write_scalar(Scalar::from_u32(f.powi(i).to_bits()), dest)?;
281             }
282
283             "powif64" => {
284                 let &[f, i] = check_arg_count(args)?;
285                 // FIXME: Using host floats.
286                 let f = f64::from_bits(this.read_scalar(f)?.to_u64()?);
287                 let i = this.read_scalar(i)?.to_i32()?;
288                 this.write_scalar(Scalar::from_u64(f.powi(i).to_bits()), dest)?;
289             }
290
291             "float_to_int_unchecked" => {
292                 let &[val] = check_arg_count(args)?;
293                 let val = this.read_immediate(val)?;
294
295                 let res = match val.layout.ty.kind() {
296                     ty::Float(FloatTy::F32) => {
297                         this.float_to_int_unchecked(val.to_scalar()?.to_f32()?, dest.layout.ty)?
298                     }
299                     ty::Float(FloatTy::F64) => {
300                         this.float_to_int_unchecked(val.to_scalar()?.to_f64()?, dest.layout.ty)?
301                     }
302                     _ => bug!("`float_to_int_unchecked` called with non-float input type {:?}", val.layout.ty),
303                 };
304
305                 this.write_scalar(res, dest)?;
306             }
307
308             // Atomic operations
309             "atomic_load" => this.atomic_load(args, dest, AtomicReadOp::SeqCst)?,
310             "atomic_load_relaxed" => this.atomic_load(args, dest, AtomicReadOp::Relaxed)?,
311             "atomic_load_acq" => this.atomic_load(args, dest, AtomicReadOp::Acquire)?,
312
313             "atomic_store" => this.atomic_store(args, AtomicWriteOp::SeqCst)?,
314             "atomic_store_relaxed" => this.atomic_store(args, AtomicWriteOp::Relaxed)?,
315             "atomic_store_rel" => this.atomic_store(args, AtomicWriteOp::Release)?,
316
317             "atomic_fence_acq" => this.atomic_fence(args, AtomicFenceOp::Acquire)?,
318             "atomic_fence_rel" => this.atomic_fence(args, AtomicFenceOp::Release)?,
319             "atomic_fence_acqrel" => this.atomic_fence(args, AtomicFenceOp::AcqRel)?,
320             "atomic_fence" => this.atomic_fence(args, AtomicFenceOp::SeqCst)?,
321
322             "atomic_singlethreadfence_acq" => this.compiler_fence(args, AtomicFenceOp::Acquire)?,
323             "atomic_singlethreadfence_rel" => this.compiler_fence(args, AtomicFenceOp::Release)?,
324             "atomic_singlethreadfence_acqrel" => this.compiler_fence(args, AtomicFenceOp::AcqRel)?,
325             "atomic_singlethreadfence" => this.compiler_fence(args, AtomicFenceOp::SeqCst)?,
326
327             "atomic_xchg" => this.atomic_exchange(args, dest, AtomicRWOp::SeqCst)?,
328             "atomic_xchg_acq" => this.atomic_exchange(args, dest, AtomicRWOp::Acquire)?,
329             "atomic_xchg_rel" => this.atomic_exchange(args, dest, AtomicRWOp::Release)?,
330             "atomic_xchg_acqrel" => this.atomic_exchange(args, dest, AtomicRWOp::AcqRel)?,
331             "atomic_xchg_relaxed" => this.atomic_exchange(args, dest, AtomicRWOp::Relaxed)?,
332
333             "atomic_cxchg" => this.atomic_compare_exchange(
334                 args, dest, AtomicRWOp::SeqCst, AtomicReadOp::SeqCst
335             )?,
336             "atomic_cxchg_acq" => this.atomic_compare_exchange(
337                 args, dest, AtomicRWOp::Acquire, AtomicReadOp::Acquire
338             )?,
339             "atomic_cxchg_rel" => this.atomic_compare_exchange(
340                 args, dest, AtomicRWOp::Release, AtomicReadOp::Relaxed
341             )?,
342             "atomic_cxchg_acqrel" => this.atomic_compare_exchange
343             (args, dest, AtomicRWOp::AcqRel, AtomicReadOp::Acquire
344             )?,
345             "atomic_cxchg_relaxed" => this.atomic_compare_exchange(
346                 args, dest, AtomicRWOp::Relaxed, AtomicReadOp::Relaxed
347             )?,
348             "atomic_cxchg_acq_failrelaxed" => this.atomic_compare_exchange(
349                 args, dest, AtomicRWOp::Acquire, AtomicReadOp::Relaxed
350             )?,
351             "atomic_cxchg_acqrel_failrelaxed" => this.atomic_compare_exchange(
352                 args, dest, AtomicRWOp::AcqRel, AtomicReadOp::Relaxed
353             )?,
354             "atomic_cxchg_failrelaxed" => this.atomic_compare_exchange(
355                 args, dest, AtomicRWOp::SeqCst, AtomicReadOp::Relaxed
356             )?,
357             "atomic_cxchg_failacq" => this.atomic_compare_exchange(
358                 args, dest, AtomicRWOp::SeqCst, AtomicReadOp::Acquire
359             )?,
360
361             "atomic_cxchgweak" => this.atomic_compare_exchange_weak(
362                 args, dest, AtomicRWOp::SeqCst, AtomicReadOp::SeqCst
363             )?,
364             "atomic_cxchgweak_acq" => this.atomic_compare_exchange_weak(
365                 args, dest, AtomicRWOp::Acquire, AtomicReadOp::Acquire
366             )?,
367             "atomic_cxchgweak_rel" => this.atomic_compare_exchange_weak(
368                 args, dest, AtomicRWOp::Release, AtomicReadOp::Relaxed
369             )?,
370             "atomic_cxchgweak_acqrel" => this.atomic_compare_exchange_weak(
371                 args, dest, AtomicRWOp::AcqRel, AtomicReadOp::Acquire
372             )?,
373             "atomic_cxchgweak_relaxed" => this.atomic_compare_exchange_weak(
374                 args, dest, AtomicRWOp::Relaxed, AtomicReadOp::Relaxed
375             )?,
376             "atomic_cxchgweak_acq_failrelaxed" => this.atomic_compare_exchange_weak(
377                 args, dest, AtomicRWOp::Acquire, AtomicReadOp::Relaxed
378             )?,
379             "atomic_cxchgweak_acqrel_failrelaxed" => this.atomic_compare_exchange_weak(
380                 args, dest, AtomicRWOp::AcqRel, AtomicReadOp::Relaxed
381             )?,
382             "atomic_cxchgweak_failrelaxed" => this.atomic_compare_exchange_weak(
383                 args, dest, AtomicRWOp::SeqCst, AtomicReadOp::Relaxed
384             )?,
385             "atomic_cxchgweak_failacq" => this.atomic_compare_exchange_weak(
386                 args, dest, AtomicRWOp::SeqCst, AtomicReadOp::Acquire
387             )?,
388
389             "atomic_or" => this.atomic_op(args, dest, BinOp::BitOr, false, AtomicRWOp::SeqCst)?,
390             "atomic_or_acq" => this.atomic_op(args, dest, BinOp::BitOr, false, AtomicRWOp::Acquire)?,
391             "atomic_or_rel" => this.atomic_op(args, dest, BinOp::BitOr, false, AtomicRWOp::Release)?,
392             "atomic_or_acqrel" => this.atomic_op(args, dest, BinOp::BitOr, false, AtomicRWOp::AcqRel)?,
393             "atomic_or_relaxed" => this.atomic_op(args, dest, BinOp::BitOr, false, AtomicRWOp::Relaxed)?,
394             "atomic_xor" => this.atomic_op(args, dest, BinOp::BitXor, false, AtomicRWOp::SeqCst)?,
395             "atomic_xor_acq" => this.atomic_op(args, dest, BinOp::BitXor, false, AtomicRWOp::Acquire)?,
396             "atomic_xor_rel" => this.atomic_op(args, dest, BinOp::BitXor, false, AtomicRWOp::Release)?,
397             "atomic_xor_acqrel" => this.atomic_op(args, dest, BinOp::BitXor, false, AtomicRWOp::AcqRel)?,
398             "atomic_xor_relaxed" => this.atomic_op(args, dest, BinOp::BitXor, false, AtomicRWOp::Relaxed)?,
399             "atomic_and" => this.atomic_op(args, dest, BinOp::BitAnd, false, AtomicRWOp::SeqCst)?,
400             "atomic_and_acq" => this.atomic_op(args, dest, BinOp::BitAnd, false, AtomicRWOp::Acquire)?,
401             "atomic_and_rel" => this.atomic_op(args, dest, BinOp::BitAnd, false, AtomicRWOp::Release)?,
402             "atomic_and_acqrel" => this.atomic_op(args, dest, BinOp::BitAnd, false, AtomicRWOp::AcqRel)?,
403             "atomic_and_relaxed" => this.atomic_op(args, dest, BinOp::BitAnd, false, AtomicRWOp::Relaxed)?,
404             "atomic_nand" => this.atomic_op(args, dest, BinOp::BitAnd, true, AtomicRWOp::SeqCst)?,
405             "atomic_nand_acq" => this.atomic_op(args, dest, BinOp::BitAnd, true, AtomicRWOp::Acquire)?,
406             "atomic_nand_rel" => this.atomic_op(args, dest, BinOp::BitAnd, true, AtomicRWOp::Release)?,
407             "atomic_nand_acqrel" => this.atomic_op(args, dest, BinOp::BitAnd, true, AtomicRWOp::AcqRel)?,
408             "atomic_nand_relaxed" => this.atomic_op(args, dest, BinOp::BitAnd, true, AtomicRWOp::Relaxed)?,
409             "atomic_xadd" => this.atomic_op(args, dest, BinOp::Add, false, AtomicRWOp::SeqCst)?,
410             "atomic_xadd_acq" => this.atomic_op(args, dest, BinOp::Add, false, AtomicRWOp::Acquire)?,
411             "atomic_xadd_rel" => this.atomic_op(args, dest, BinOp::Add, false, AtomicRWOp::Release)?,
412             "atomic_xadd_acqrel" => this.atomic_op(args, dest, BinOp::Add, false, AtomicRWOp::AcqRel)?,
413             "atomic_xadd_relaxed" => this.atomic_op(args, dest, BinOp::Add, false, AtomicRWOp::Relaxed)?,
414             "atomic_xsub" => this.atomic_op(args, dest, BinOp::Sub, false, AtomicRWOp::SeqCst)?,
415             "atomic_xsub_acq" => this.atomic_op(args, dest, BinOp::Sub, false, AtomicRWOp::Acquire)?,
416             "atomic_xsub_rel" => this.atomic_op(args, dest, BinOp::Sub, false, AtomicRWOp::Release)?,
417             "atomic_xsub_acqrel" => this.atomic_op(args, dest, BinOp::Sub, false, AtomicRWOp::AcqRel)?,
418             "atomic_xsub_relaxed" => this.atomic_op(args, dest, BinOp::Sub, false, AtomicRWOp::Relaxed)?,
419
420
421             // Query type information
422             "assert_inhabited" |
423             "assert_zero_valid" |
424             "assert_uninit_valid" => {
425                 let &[] = check_arg_count(args)?;
426                 let ty = instance.substs.type_at(0);
427                 let layout = this.layout_of(ty)?;
428                 // Abort here because the caller might not be panic safe.
429                 if layout.abi.is_uninhabited() {
430                     throw_machine_stop!(TerminationInfo::Abort(Some(format!("attempted to instantiate uninhabited type `{}`", ty))))
431                 }
432                 if intrinsic_name == "assert_zero_valid" && !layout.might_permit_raw_init(this, /*zero:*/ true).unwrap() {
433                     throw_machine_stop!(TerminationInfo::Abort(Some(format!("attempted to zero-initialize type `{}`, which is invalid", ty))))
434                 }
435                 if intrinsic_name == "assert_uninit_valid" && !layout.might_permit_raw_init(this, /*zero:*/ false).unwrap() {
436                     throw_machine_stop!(TerminationInfo::Abort(Some(format!("attempted to leave type `{}` uninitialized, which is invalid", ty))))
437                 }
438             }
439
440             // Other
441             "exact_div" => {
442                 let &[num, denom] = check_arg_count(args)?;
443                 this.exact_div(this.read_immediate(num)?, this.read_immediate(denom)?, dest)?;
444             }
445
446             "forget" => {
447                 // We get an argument... and forget about it.
448                 let &[_] = check_arg_count(args)?;
449             }
450
451             "try" => return this.handle_try(args, dest, ret),
452
453             name => throw_unsup_format!("unimplemented intrinsic: {}", name),
454         }
455
456         trace!("{:?}", this.dump_place(*dest));
457         this.go_to_block(ret);
458         Ok(())
459     }
460
461     fn atomic_load(
462         &mut self, args: &[OpTy<'tcx, Tag>], dest: PlaceTy<'tcx, Tag>,
463         atomic: AtomicReadOp
464     ) -> InterpResult<'tcx> {
465         let this = self.eval_context_mut();
466
467
468         let &[place] = check_arg_count(args)?;
469         let place = this.deref_operand(place)?;
470
471         // make sure it fits into a scalar; otherwise it cannot be atomic
472         let val = this.read_scalar_atomic(place, atomic)?;
473
474         // Check alignment requirements. Atomics must always be aligned to their size,
475         // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
476         // be 8-aligned).
477         let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
478         this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
479         this.write_scalar(val, dest)?;
480         Ok(())
481     }
482
483     fn atomic_store(&mut self, args: &[OpTy<'tcx, Tag>], atomic: AtomicWriteOp) -> InterpResult<'tcx> {
484         let this = self.eval_context_mut();
485
486         let &[place, val] = check_arg_count(args)?;
487         let place = this.deref_operand(place)?;
488         let val = this.read_scalar(val)?; // make sure it fits into a scalar; otherwise it cannot be atomic
489
490         // Check alignment requirements. Atomics must always be aligned to their size,
491         // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
492         // be 8-aligned).
493         let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
494         this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
495
496         // Perform atomic store
497         this.write_scalar_atomic(val, place, atomic)?;
498         Ok(())
499     }
500
501     fn compiler_fence(&mut self, args: &[OpTy<'tcx, Tag>], atomic: AtomicFenceOp) -> InterpResult<'tcx> {
502         let &[] = check_arg_count(args)?;
503         let _ = atomic;
504         //FIXME: compiler fences are currently ignored
505         Ok(())
506     }
507
508     fn atomic_fence(&mut self, args: &[OpTy<'tcx, Tag>], atomic: AtomicFenceOp) -> InterpResult<'tcx> {
509         let this = self.eval_context_mut();
510         let &[] = check_arg_count(args)?;
511         this.validate_atomic_fence(atomic)?;
512         Ok(())
513     }
514
515     fn atomic_op(
516         &mut self, args: &[OpTy<'tcx, Tag>], dest: PlaceTy<'tcx, Tag>,
517         op: mir::BinOp, neg: bool, atomic: AtomicRWOp
518     ) -> InterpResult<'tcx> {
519         let this = self.eval_context_mut();
520
521         let &[place, rhs] = check_arg_count(args)?;
522         let place = this.deref_operand(place)?;
523         if !place.layout.ty.is_integral() {
524             bug!("Atomic arithmetic operations only work on integer types");
525         }
526         let rhs = this.read_immediate(rhs)?;
527         let old = this.allow_data_races_mut(|this| {
528             this.read_immediate(place. into())
529         })?;
530
531         // Check alignment requirements. Atomics must always be aligned to their size,
532         // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
533         // be 8-aligned).
534         let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
535         this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
536         this.write_immediate(*old, dest)?; // old value is returned
537
538         // Atomics wrap around on overflow.
539         let val = this.binary_op(op, old, rhs)?;
540         let val = if neg { this.unary_op(mir::UnOp::Not, val)? } else { val };
541         this.allow_data_races_mut(|this| {
542             this.write_immediate(*val, place.into())
543         })?;
544
545         this.validate_atomic_rmw(place, atomic)?;
546         Ok(())
547     }
548     
549     fn atomic_exchange(
550         &mut self, args: &[OpTy<'tcx, Tag>], dest: PlaceTy<'tcx, Tag>, atomic: AtomicRWOp
551     ) -> InterpResult<'tcx> {
552         let this = self.eval_context_mut();
553
554         let &[place, new] = check_arg_count(args)?;
555         let place = this.deref_operand(place)?;
556         let new = this.read_scalar(new)?;
557         let old = this.allow_data_races_mut(|this| {
558             this.read_scalar(place.into())
559         })?;
560
561         // Check alignment requirements. Atomics must always be aligned to their size,
562         // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
563         // be 8-aligned).
564         let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
565         this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
566
567         this.write_scalar(old, dest)?; // old value is returned
568         this.allow_data_races_mut(|this| {
569             this.write_scalar(new, place.into())
570         })?;
571
572         this.validate_atomic_rmw(place, atomic)?;
573         Ok(())
574     }
575
576     fn atomic_compare_exchange(
577         &mut self, args: &[OpTy<'tcx, Tag>], dest: PlaceTy<'tcx, Tag>,
578         success: AtomicRWOp, fail: AtomicReadOp
579     ) -> InterpResult<'tcx> {
580         let this = self.eval_context_mut();
581
582         let &[place, expect_old, new] = check_arg_count(args)?;
583         let place = this.deref_operand(place)?;
584         let expect_old = this.read_immediate(expect_old)?; // read as immediate for the sake of `binary_op()`
585         let new = this.read_scalar(new)?;
586
587         // Failure ordering cannot be stronger than success ordering, therefore first attempt
588         //  to read with the failure ordering and if successfull then try again with the success
589         //  read ordering and write in the success case.
590         // Read as immediate for the sake of `binary_op()`
591         let old = this.allow_data_races_mut(|this| {
592             this.read_immediate(place.into())
593         })?; 
594
595         // Check alignment requirements. Atomics must always be aligned to their size,
596         // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
597         // be 8-aligned).
598         let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
599         this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
600
601         // `binary_op` will bail if either of them is not a scalar.
602         let eq = this.overflowing_binary_op(mir::BinOp::Eq, old, expect_old)?.0;
603         let res = Immediate::ScalarPair(old.to_scalar_or_uninit(), eq.into());
604
605         // Return old value.
606         this.write_immediate(res, dest)?;
607
608         // Update ptr depending on comparison.
609         //  if successful, perform a full rw-atomic validation
610         //  otherwise treat this as an atomic load with the fail ordering
611         if eq.to_bool()? {
612             this.allow_data_races_mut(|this| {
613                 this.write_scalar(new, place.into())
614             })?;
615             this.validate_atomic_rmw(place, success)?;
616         } else {
617             this.validate_atomic_load(place, fail)?;
618         }
619
620         Ok(())
621     }
622
623     fn atomic_compare_exchange_weak(
624         &mut self, args: &[OpTy<'tcx, Tag>], dest: PlaceTy<'tcx, Tag>,
625         success: AtomicRWOp, fail: AtomicReadOp
626     ) -> InterpResult<'tcx> {
627
628         // FIXME: the weak part of this is currently not modelled,
629         //  it is assumed to always succeed unconditionally.
630         self.atomic_compare_exchange(args, dest, success, fail)
631     }
632
633     fn float_to_int_unchecked<F>(
634         &self,
635         f: F,
636         dest_ty: ty::Ty<'tcx>,
637     ) -> InterpResult<'tcx, Scalar<Tag>>
638     where
639         F: Float + Into<Scalar<Tag>>
640     {
641         let this = self.eval_context_ref();
642
643         // Step 1: cut off the fractional part of `f`. The result of this is
644         // guaranteed to be precisely representable in IEEE floats.
645         let f = f.round_to_integral(Round::TowardZero).value;
646
647         // Step 2: Cast the truncated float to the target integer type and see if we lose any information in this step.
648         Ok(match dest_ty.kind() {
649             // Unsigned
650             ty::Uint(t) => {
651                 let size = Integer::from_attr(this, attr::IntType::UnsignedInt(*t)).size();
652                 let res = f.to_u128(size.bits_usize());
653                 if res.status.is_empty() {
654                     // No status flags means there was no further rounding or other loss of precision.
655                     Scalar::from_uint(res.value, size)
656                 } else {
657                     // `f` was not representable in this integer type.
658                     throw_ub_format!(
659                         "`float_to_int_unchecked` intrinsic called on {} which cannot be represented in target type `{:?}`",
660                         f, dest_ty,
661                     );
662                 }
663             }
664             // Signed
665             ty::Int(t) => {
666                 let size = Integer::from_attr(this, attr::IntType::SignedInt(*t)).size();
667                 let res = f.to_i128(size.bits_usize());
668                 if res.status.is_empty() {
669                     // No status flags means there was no further rounding or other loss of precision.
670                     Scalar::from_int(res.value, size)
671                 } else {
672                     // `f` was not representable in this integer type.
673                     throw_ub_format!(
674                         "`float_to_int_unchecked` intrinsic called on {} which cannot be represented in target type `{:?}`",
675                         f, dest_ty,
676                     );
677                 }
678             }
679             // Nothing else
680             _ => bug!("`float_to_int_unchecked` called with non-int output type {:?}", dest_ty),
681         })
682     }
683 }