1 use rustc_apfloat::Float;
3 use rustc::mir::interpret::{InterpResult, PointerArithmetic};
4 use rustc::ty::layout::{self, LayoutOf, Size};
8 PlaceTy, OpTy, ImmTy, Immediate, Scalar, Tag,
12 impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
13 pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
16 instance: ty::Instance<'tcx>,
17 args: &[OpTy<'tcx, Tag>],
18 dest: PlaceTy<'tcx, Tag>,
19 ) -> InterpResult<'tcx> {
20 let this = self.eval_context_mut();
21 if this.emulate_intrinsic(instance, args, dest)? {
24 let tcx = &{this.tcx.tcx};
25 let substs = instance.substs;
27 // All these intrinsics take raw pointers, so if we access memory directly
28 // (as opposed to through a place), we have to remember to erase any tag
29 // that might still hang around!
31 let intrinsic_name = this.tcx.item_name(instance.def_id()).as_str();
32 match intrinsic_name.get() {
34 let offset = this.read_scalar(args[1])?.to_isize(this)?;
35 let ptr = this.read_scalar(args[0])?.not_undef()?;
37 let pointee_ty = substs.type_at(0);
38 let pointee_size = this.layout_of(pointee_ty)?.size.bytes() as i64;
39 let offset = offset.overflowing_mul(pointee_size).0;
40 let result_ptr = ptr.ptr_wrapping_signed_offset(offset, this);
41 this.write_scalar(result_ptr, dest)?;
45 let cond = this.read_scalar(args[0])?.to_bool()?;
47 throw_unsup!(AssumptionNotHeld);
52 "atomic_load_relaxed" |
53 "atomic_load_acq" => {
54 let ptr = this.deref_operand(args[0])?;
55 let val = this.read_scalar(ptr.into())?; // make sure it fits into a scalar; otherwise it cannot be atomic
56 this.write_scalar(val, dest)?;
60 let ptr = this.deref_operand(args[0])?;
61 this.copy_op(ptr.into(), dest)?;
65 "atomic_store_relaxed" |
66 "atomic_store_rel" => {
67 let ptr = this.deref_operand(args[0])?;
68 let val = this.read_scalar(args[1])?; // make sure it fits into a scalar; otherwise it cannot be atomic
69 this.write_scalar(val, ptr.into())?;
73 let ptr = this.deref_operand(args[0])?;
74 this.copy_op(args[1], ptr.into())?;
77 "atomic_fence_acq" => {
78 // we are inherently singlethreaded and singlecored, this is a nop
81 _ if intrinsic_name.starts_with("atomic_xchg") => {
82 let ptr = this.deref_operand(args[0])?;
83 let new = this.read_scalar(args[1])?;
84 let old = this.read_scalar(ptr.into())?;
85 this.write_scalar(old, dest)?; // old value is returned
86 this.write_scalar(new, ptr.into())?;
89 _ if intrinsic_name.starts_with("atomic_cxchg") => {
90 let ptr = this.deref_operand(args[0])?;
91 let expect_old = this.read_immediate(args[1])?; // read as immediate for the sake of `binary_op()`
92 let new = this.read_scalar(args[2])?;
93 let old = this.read_immediate(ptr.into())?; // read as immediate for the sake of `binary_op()`
94 // binary_op will bail if either of them is not a scalar
95 let (eq, _) = this.binary_op(mir::BinOp::Eq, old, expect_old)?;
96 let res = Immediate::ScalarPair(old.to_scalar_or_undef(), eq.into());
97 this.write_immediate(res, dest)?; // old value is returned
98 // update ptr depending on comparison
100 this.write_scalar(new, ptr.into())?;
108 "atomic_or_relaxed" |
112 "atomic_xor_acqrel" |
113 "atomic_xor_relaxed" |
117 "atomic_and_acqrel" |
118 "atomic_and_relaxed" |
122 "atomic_nand_acqrel" |
123 "atomic_nand_relaxed" |
127 "atomic_xadd_acqrel" |
128 "atomic_xadd_relaxed" |
132 "atomic_xsub_acqrel" |
133 "atomic_xsub_relaxed" => {
134 let ptr = this.deref_operand(args[0])?;
135 if !ptr.layout.ty.is_integral() {
136 throw_unsup!(Unimplemented(format!("Atomic arithmetic operations only work on integer types")));
138 let rhs = this.read_immediate(args[1])?;
139 let old = this.read_immediate(ptr.into())?;
140 this.write_immediate(*old, dest)?; // old value is returned
141 let (op, neg) = match intrinsic_name.split('_').nth(1).unwrap() {
142 "or" => (mir::BinOp::BitOr, false),
143 "xor" => (mir::BinOp::BitXor, false),
144 "and" => (mir::BinOp::BitAnd, false),
145 "xadd" => (mir::BinOp::Add, false),
146 "xsub" => (mir::BinOp::Sub, false),
147 "nand" => (mir::BinOp::BitAnd, true),
150 // Atomics wrap around on overflow.
151 let (val, _overflowed) = this.binary_op(op, old, rhs)?;
153 this.unary_op(mir::UnOp::Not, ImmTy::from_scalar(val, old.layout))?
157 this.write_scalar(val, ptr.into())?;
160 "breakpoint" => unimplemented!(), // halt miri
163 "copy_nonoverlapping" => {
164 let elem_ty = substs.type_at(0);
165 let elem_layout = this.layout_of(elem_ty)?;
166 let elem_size = elem_layout.size.bytes();
167 let count = this.read_scalar(args[2])?.to_usize(this)?;
168 let elem_align = elem_layout.align.abi;
170 let size = Size::from_bytes(count * elem_size);
171 let src = this.read_scalar(args[0])?.not_undef()?;
172 let src = this.memory().check_ptr_access(src, size, elem_align)?;
173 let dest = this.read_scalar(args[1])?.not_undef()?;
174 let dest = this.memory().check_ptr_access(dest, size, elem_align)?;
176 if let (Some(src), Some(dest)) = (src, dest) {
177 this.memory_mut().copy(
181 intrinsic_name.ends_with("_nonoverlapping"),
186 "discriminant_value" => {
187 let place = this.deref_operand(args[0])?;
188 let discr_val = this.read_discriminant(place.into())?.0;
189 this.write_scalar(Scalar::from_uint(discr_val, dest.layout.size), dest)?;
192 "sinf32" | "fabsf32" | "cosf32" | "sqrtf32" | "expf32" | "exp2f32" | "logf32" |
193 "log10f32" | "log2f32" | "floorf32" | "ceilf32" | "truncf32" => {
194 // FIXME: Using host floats.
195 let f = f32::from_bits(this.read_scalar(args[0])?.to_u32()?);
196 let f = match intrinsic_name.get() {
198 "fabsf32" => f.abs(),
200 "sqrtf32" => f.sqrt(),
202 "exp2f32" => f.exp2(),
204 "log10f32" => f.log10(),
205 "log2f32" => f.log2(),
206 "floorf32" => f.floor(),
207 "ceilf32" => f.ceil(),
208 "truncf32" => f.trunc(),
211 this.write_scalar(Scalar::from_u32(f.to_bits()), dest)?;
214 "sinf64" | "fabsf64" | "cosf64" | "sqrtf64" | "expf64" | "exp2f64" | "logf64" |
215 "log10f64" | "log2f64" | "floorf64" | "ceilf64" | "truncf64" => {
216 // FIXME: Using host floats.
217 let f = f64::from_bits(this.read_scalar(args[0])?.to_u64()?);
218 let f = match intrinsic_name.get() {
220 "fabsf64" => f.abs(),
222 "sqrtf64" => f.sqrt(),
224 "exp2f64" => f.exp2(),
226 "log10f64" => f.log10(),
227 "log2f64" => f.log2(),
228 "floorf64" => f.floor(),
229 "ceilf64" => f.ceil(),
230 "truncf64" => f.trunc(),
233 this.write_scalar(Scalar::from_u64(f.to_bits()), dest)?;
236 "fadd_fast" | "fsub_fast" | "fmul_fast" | "fdiv_fast" | "frem_fast" => {
237 let a = this.read_immediate(args[0])?;
238 let b = this.read_immediate(args[1])?;
239 let op = match intrinsic_name.get() {
240 "fadd_fast" => mir::BinOp::Add,
241 "fsub_fast" => mir::BinOp::Sub,
242 "fmul_fast" => mir::BinOp::Mul,
243 "fdiv_fast" => mir::BinOp::Div,
244 "frem_fast" => mir::BinOp::Rem,
247 this.binop_ignore_overflow(op, a, b, dest)?;
250 "minnumf32" | "maxnumf32" => {
251 let a = this.read_scalar(args[0])?.to_f32()?;
252 let b = this.read_scalar(args[1])?.to_f32()?;
253 let res = if intrinsic_name.get().starts_with("min") {
258 this.write_scalar(Scalar::from_f32(res), dest)?;
261 "minnumf64" | "maxnumf64" => {
262 let a = this.read_scalar(args[0])?.to_f64()?;
263 let b = this.read_scalar(args[1])?.to_f64()?;
264 let res = if intrinsic_name.get().starts_with("min") {
269 this.write_scalar(Scalar::from_f64(res), dest)?;
273 // Performs an exact division, resulting in undefined behavior where
274 // `x % y != 0` or `y == 0` or `x == T::min_value() && y == -1`
275 let a = this.read_immediate(args[0])?;
276 let b = this.read_immediate(args[1])?;
278 if this.binary_op(mir::BinOp::Rem, a, b)?.0.to_bits(dest.layout.size)? != 0 {
279 // Check if `b` is -1, which is the "min_value / -1" case.
280 let minus1 = Scalar::from_int(-1, dest.layout.size);
281 return Err(if b.to_scalar().unwrap() == minus1 {
282 err_unsup!(Intrinsic(format!("exact_div: result of dividing MIN by -1 cannot be represented")))
284 err_unsup!(Intrinsic(format!("exact_div: {:?} cannot be divided by {:?} without remainder", *a, *b)))
287 this.binop_ignore_overflow(mir::BinOp::Div, a, b, dest)?;
292 "likely" | "unlikely" => {
293 // These just return their argument
294 let b = this.read_immediate(args[0])?;
295 this.write_immediate(*b, dest)?;
299 // Check fast path: we don't want to force an allocation in case the destination is a simple value,
300 // but we also do not want to create a new allocation with 0s and then copy that over.
301 // FIXME: We do not properly validate in case of ZSTs and when doing it in memory!
302 // However, this only affects direct calls of the intrinsic; calls to the stable
303 // functions wrapping them do get their validation.
304 // FIXME: should we check that the destination pointer is aligned even for ZSTs?
305 if !dest.layout.is_zst() {
306 match dest.layout.abi {
307 layout::Abi::Scalar(ref s) => {
308 let x = Scalar::from_int(0, s.value.size(this));
309 this.write_scalar(x, dest)?;
311 layout::Abi::ScalarPair(ref s1, ref s2) => {
312 let x = Scalar::from_int(0, s1.value.size(this));
313 let y = Scalar::from_int(0, s2.value.size(this));
314 this.write_immediate(Immediate::ScalarPair(x.into(), y.into()), dest)?;
318 let mplace = this.force_allocation(dest)?;
319 assert!(mplace.meta.is_none());
320 // not a zst, must be valid pointer
321 let ptr = mplace.ptr.to_ptr()?;
322 this.memory_mut().get_mut(ptr.alloc_id)?.write_repeat(tcx, ptr, 0, dest.layout.size)?;
329 let ty = substs.type_at(0);
330 let layout = this.layout_of(ty)?;
331 let align = layout.align.pref.bytes();
332 let ptr_size = this.pointer_size();
333 let align_val = Scalar::from_uint(align as u128, ptr_size);
334 this.write_scalar(align_val, dest)?;
338 let ptr = this.deref_operand(args[0])?;
339 this.copy_op(args[1], ptr.into())?;
343 let offset = this.read_scalar(args[1])?.to_isize(this)?;
344 let ptr = this.read_scalar(args[0])?.not_undef()?;
345 let result_ptr = this.pointer_offset_inbounds(ptr, substs.type_at(0), offset)?;
346 this.write_scalar(result_ptr, dest)?;
349 "panic_if_uninhabited" => {
350 let ty = substs.type_at(0);
351 let layout = this.layout_of(ty)?;
352 if layout.abi.is_uninhabited() {
353 throw_unsup!(Intrinsic(format!("Trying to instantiate uninhabited type {}", ty)))
358 // FIXME: Using host floats.
359 let f = f32::from_bits(this.read_scalar(args[0])?.to_u32()?);
360 let f2 = f32::from_bits(this.read_scalar(args[1])?.to_u32()?);
362 Scalar::from_u32(f.powf(f2).to_bits()),
368 // FIXME: Using host floats.
369 let f = f64::from_bits(this.read_scalar(args[0])?.to_u64()?);
370 let f2 = f64::from_bits(this.read_scalar(args[1])?.to_u64()?);
372 Scalar::from_u64(f.powf(f2).to_bits()),
378 let a = this.read_scalar(args[0])?.to_f32()?;
379 let b = this.read_scalar(args[1])?.to_f32()?;
380 let c = this.read_scalar(args[2])?.to_f32()?;
381 let res = a.mul_add(b, c).value;
383 Scalar::from_f32(res),
389 let a = this.read_scalar(args[0])?.to_f64()?;
390 let b = this.read_scalar(args[1])?.to_f64()?;
391 let c = this.read_scalar(args[2])?.to_f64()?;
392 let res = a.mul_add(b, c).value;
394 Scalar::from_f64(res),
400 // FIXME: Using host floats.
401 let f = f32::from_bits(this.read_scalar(args[0])?.to_u32()?);
402 let i = this.read_scalar(args[1])?.to_i32()?;
404 Scalar::from_u32(f.powi(i).to_bits()),
410 // FIXME: Using host floats.
411 let f = f64::from_bits(this.read_scalar(args[0])?.to_u64()?);
412 let i = this.read_scalar(args[1])?.to_i32()?;
414 Scalar::from_u64(f.powi(i).to_bits()),
420 let mplace = this.deref_operand(args[0])?;
421 let (size, _) = this.size_and_align_of_mplace(mplace)?
422 .expect("size_of_val called on extern type");
423 let ptr_size = this.pointer_size();
425 Scalar::from_uint(size.bytes() as u128, ptr_size),
432 let mplace = this.deref_operand(args[0])?;
433 let (_, align) = this.size_and_align_of_mplace(mplace)?
434 .expect("size_of_val called on extern type");
435 let ptr_size = this.pointer_size();
437 Scalar::from_uint(align.bytes(), ptr_size),
443 let l = this.read_immediate(args[0])?;
444 let r = this.read_immediate(args[1])?;
445 let rval = r.to_scalar()?.to_bits(args[1].layout.size)?;
447 throw_unsup!(Intrinsic(format!("Division by 0 in unchecked_div")));
449 this.binop_ignore_overflow(
458 let l = this.read_immediate(args[0])?;
459 let r = this.read_immediate(args[1])?;
460 let rval = r.to_scalar()?.to_bits(args[1].layout.size)?;
462 throw_unsup!(Intrinsic(format!("Division by 0 in unchecked_rem")));
464 this.binop_ignore_overflow(
472 "unchecked_add" | "unchecked_sub" | "unchecked_mul" => {
473 let l = this.read_immediate(args[0])?;
474 let r = this.read_immediate(args[1])?;
475 let op = match intrinsic_name.get() {
476 "unchecked_add" => mir::BinOp::Add,
477 "unchecked_sub" => mir::BinOp::Sub,
478 "unchecked_mul" => mir::BinOp::Mul,
481 let (res, overflowed) = this.binary_op(op, l, r)?;
483 throw_unsup!(Intrinsic(format!("Overflowing arithmetic in {}", intrinsic_name.get())));
485 this.write_scalar(res, dest)?;
489 let ty = substs.type_at(0);
490 let ty_layout = this.layout_of(ty)?;
491 let val_byte = this.read_scalar(args[1])?.to_u8()?;
492 let ptr = this.read_scalar(args[0])?.not_undef()?;
493 let count = this.read_scalar(args[2])?.to_usize(this)?;
494 let byte_count = ty_layout.size * count;
495 match this.memory().check_ptr_access(ptr, byte_count, ty_layout.align.abi)? {
498 .get_mut(ptr.alloc_id)?
499 .write_repeat(tcx, ptr, val_byte, byte_count)?;
502 // Size is 0, nothing to do.
507 name => throw_unsup!(Unimplemented(format!("unimplemented intrinsic: {}", name))),