1 use rustc_apfloat::Float;
3 use rustc::mir::interpret::{InterpResult, PointerArithmetic};
4 use rustc::ty::layout::{self, LayoutOf, Size, Align};
8 PlaceTy, OpTy, ImmTy, Immediate, Scalar, Tag,
12 impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
13 pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
16 instance: ty::Instance<'tcx>,
17 args: &[OpTy<'tcx, Tag>],
18 dest: PlaceTy<'tcx, Tag>,
19 ) -> InterpResult<'tcx> {
20 let this = self.eval_context_mut();
21 if this.emulate_intrinsic(instance, args, dest)? {
24 let tcx = &{this.tcx.tcx};
25 let substs = instance.substs;
27 // All these intrinsics take raw pointers, so if we access memory directly
28 // (as opposed to through a place), we have to remember to erase any tag
29 // that might still hang around!
31 let intrinsic_name = this.tcx.item_name(instance.def_id()).as_str();
32 match intrinsic_name.get() {
34 let offset = this.read_scalar(args[1])?.to_isize(this)?;
35 let ptr = this.read_scalar(args[0])?.not_undef()?;
37 let pointee_ty = substs.type_at(0);
38 let pointee_size = this.layout_of(pointee_ty)?.size.bytes() as i64;
39 let offset = offset.overflowing_mul(pointee_size).0;
40 let result_ptr = ptr.ptr_wrapping_signed_offset(offset, this);
41 this.write_scalar(result_ptr, dest)?;
45 let cond = this.read_scalar(args[0])?.to_bool()?;
47 throw_ub_format!("`assume` intrinsic called with `false`");
52 let place = this.deref_operand(args[0])?;
53 this.copy_op(place.into(), dest)?;
57 let place = this.deref_operand(args[0])?;
58 this.copy_op(args[1], place.into())?;
62 "atomic_load_relaxed" |
63 "atomic_load_acq" => {
64 let place = this.deref_operand(args[0])?;
65 let val = this.read_scalar(place.into())?; // make sure it fits into a scalar; otherwise it cannot be atomic
67 // Check alignment requirements. Atomics must always be aligned to their size,
68 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
70 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
71 this.memory().check_ptr_access(place.ptr, place.layout.size, align)?;
73 this.write_scalar(val, dest)?;
77 "atomic_store_relaxed" |
78 "atomic_store_rel" => {
79 let place = this.deref_operand(args[0])?;
80 let val = this.read_scalar(args[1])?; // make sure it fits into a scalar; otherwise it cannot be atomic
82 // Check alignment requirements. Atomics must always be aligned to their size,
83 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
85 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
86 this.memory().check_ptr_access(place.ptr, place.layout.size, align)?;
88 this.write_scalar(val, place.into())?;
91 "atomic_fence_acq" => {
92 // we are inherently singlethreaded and singlecored, this is a nop
95 _ if intrinsic_name.starts_with("atomic_xchg") => {
96 let place = this.deref_operand(args[0])?;
97 let new = this.read_scalar(args[1])?;
98 let old = this.read_scalar(place.into())?;
100 // Check alignment requirements. Atomics must always be aligned to their size,
101 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
103 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
104 this.memory().check_ptr_access(place.ptr, place.layout.size, align)?;
106 this.write_scalar(old, dest)?; // old value is returned
107 this.write_scalar(new, place.into())?;
110 _ if intrinsic_name.starts_with("atomic_cxchg") => {
111 let place = this.deref_operand(args[0])?;
112 let expect_old = this.read_immediate(args[1])?; // read as immediate for the sake of `binary_op()`
113 let new = this.read_scalar(args[2])?;
114 let old = this.read_immediate(place.into())?; // read as immediate for the sake of `binary_op()`
116 // Check alignment requirements. Atomics must always be aligned to their size,
117 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
119 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
120 this.memory().check_ptr_access(place.ptr, place.layout.size, align)?;
122 // binary_op will bail if either of them is not a scalar
123 let (eq, _) = this.binary_op(mir::BinOp::Eq, old, expect_old)?;
124 let res = Immediate::ScalarPair(old.to_scalar_or_undef(), eq.into());
125 this.write_immediate(res, dest)?; // old value is returned
126 // update ptr depending on comparison
128 this.write_scalar(new, place.into())?;
136 "atomic_or_relaxed" |
140 "atomic_xor_acqrel" |
141 "atomic_xor_relaxed" |
145 "atomic_and_acqrel" |
146 "atomic_and_relaxed" |
150 "atomic_nand_acqrel" |
151 "atomic_nand_relaxed" |
155 "atomic_xadd_acqrel" |
156 "atomic_xadd_relaxed" |
160 "atomic_xsub_acqrel" |
161 "atomic_xsub_relaxed" => {
162 let place = this.deref_operand(args[0])?;
163 if !place.layout.ty.is_integral() {
164 bug!("Atomic arithmetic operations only work on integer types");
166 let rhs = this.read_immediate(args[1])?;
167 let old = this.read_immediate(place.into())?;
169 // Check alignment requirements. Atomics must always be aligned to their size,
170 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
172 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
173 this.memory().check_ptr_access(place.ptr, place.layout.size, align)?;
175 this.write_immediate(*old, dest)?; // old value is returned
176 let (op, neg) = match intrinsic_name.split('_').nth(1).unwrap() {
177 "or" => (mir::BinOp::BitOr, false),
178 "xor" => (mir::BinOp::BitXor, false),
179 "and" => (mir::BinOp::BitAnd, false),
180 "xadd" => (mir::BinOp::Add, false),
181 "xsub" => (mir::BinOp::Sub, false),
182 "nand" => (mir::BinOp::BitAnd, true),
185 // Atomics wrap around on overflow.
186 let (val, _overflowed) = this.binary_op(op, old, rhs)?;
188 this.unary_op(mir::UnOp::Not, ImmTy::from_scalar(val, old.layout))?
192 this.write_scalar(val, place.into())?;
195 "breakpoint" => unimplemented!(), // halt miri
198 "copy_nonoverlapping" => {
199 let elem_ty = substs.type_at(0);
200 let elem_layout = this.layout_of(elem_ty)?;
201 let elem_size = elem_layout.size.bytes();
202 let count = this.read_scalar(args[2])?.to_usize(this)?;
203 let elem_align = elem_layout.align.abi;
205 let size = Size::from_bytes(count * elem_size);
206 let src = this.read_scalar(args[0])?.not_undef()?;
207 let src = this.memory().check_ptr_access(src, size, elem_align)?;
208 let dest = this.read_scalar(args[1])?.not_undef()?;
209 let dest = this.memory().check_ptr_access(dest, size, elem_align)?;
211 if let (Some(src), Some(dest)) = (src, dest) {
212 this.memory_mut().copy(
216 intrinsic_name.ends_with("_nonoverlapping"),
221 "discriminant_value" => {
222 let place = this.deref_operand(args[0])?;
223 let discr_val = this.read_discriminant(place.into())?.0;
224 this.write_scalar(Scalar::from_uint(discr_val, dest.layout.size), dest)?;
227 "sinf32" | "fabsf32" | "cosf32" | "sqrtf32" | "expf32" | "exp2f32" | "logf32" |
228 "log10f32" | "log2f32" | "floorf32" | "ceilf32" | "truncf32" | "roundf32" => {
229 // FIXME: Using host floats.
230 let f = f32::from_bits(this.read_scalar(args[0])?.to_u32()?);
231 let f = match intrinsic_name.get() {
233 "fabsf32" => f.abs(),
235 "sqrtf32" => f.sqrt(),
237 "exp2f32" => f.exp2(),
239 "log10f32" => f.log10(),
240 "log2f32" => f.log2(),
241 "floorf32" => f.floor(),
242 "ceilf32" => f.ceil(),
243 "truncf32" => f.trunc(),
244 "roundf32" => f.round(),
247 this.write_scalar(Scalar::from_u32(f.to_bits()), dest)?;
250 "sinf64" | "fabsf64" | "cosf64" | "sqrtf64" | "expf64" | "exp2f64" | "logf64" |
251 "log10f64" | "log2f64" | "floorf64" | "ceilf64" | "truncf64" | "roundf64" => {
252 // FIXME: Using host floats.
253 let f = f64::from_bits(this.read_scalar(args[0])?.to_u64()?);
254 let f = match intrinsic_name.get() {
256 "fabsf64" => f.abs(),
258 "sqrtf64" => f.sqrt(),
260 "exp2f64" => f.exp2(),
262 "log10f64" => f.log10(),
263 "log2f64" => f.log2(),
264 "floorf64" => f.floor(),
265 "ceilf64" => f.ceil(),
266 "truncf64" => f.trunc(),
267 "roundf64" => f.round(),
270 this.write_scalar(Scalar::from_u64(f.to_bits()), dest)?;
273 "fadd_fast" | "fsub_fast" | "fmul_fast" | "fdiv_fast" | "frem_fast" => {
274 let a = this.read_immediate(args[0])?;
275 let b = this.read_immediate(args[1])?;
276 let op = match intrinsic_name.get() {
277 "fadd_fast" => mir::BinOp::Add,
278 "fsub_fast" => mir::BinOp::Sub,
279 "fmul_fast" => mir::BinOp::Mul,
280 "fdiv_fast" => mir::BinOp::Div,
281 "frem_fast" => mir::BinOp::Rem,
284 this.binop_ignore_overflow(op, a, b, dest)?;
287 "minnumf32" | "maxnumf32" => {
288 let a = this.read_scalar(args[0])?.to_f32()?;
289 let b = this.read_scalar(args[1])?.to_f32()?;
290 let res = if intrinsic_name.get().starts_with("min") {
295 this.write_scalar(Scalar::from_f32(res), dest)?;
298 "minnumf64" | "maxnumf64" => {
299 let a = this.read_scalar(args[0])?.to_f64()?;
300 let b = this.read_scalar(args[1])?.to_f64()?;
301 let res = if intrinsic_name.get().starts_with("min") {
306 this.write_scalar(Scalar::from_f64(res), dest)?;
310 // Performs an exact division, resulting in undefined behavior where
311 // `x % y != 0` or `y == 0` or `x == T::min_value() && y == -1`
312 let a = this.read_immediate(args[0])?;
313 let b = this.read_immediate(args[1])?;
315 if this.binary_op(mir::BinOp::Rem, a, b)?.0.to_bits(dest.layout.size)? != 0 {
316 // Check if `b` is -1, which is the "min_value / -1" case.
317 let minus1 = Scalar::from_int(-1, dest.layout.size);
318 return Err(if b.to_scalar().unwrap() == minus1 {
319 err_ub_format!("exact_div: result of dividing MIN by -1 cannot be represented")
321 err_ub_format!("exact_div: {:?} cannot be divided by {:?} without remainder", *a, *b)
324 this.binop_ignore_overflow(mir::BinOp::Div, a, b, dest)?;
329 "likely" | "unlikely" => {
330 // These just return their argument
331 let b = this.read_immediate(args[0])?;
332 this.write_immediate(*b, dest)?;
336 // Check fast path: we don't want to force an allocation in case the destination is a simple value,
337 // but we also do not want to create a new allocation with 0s and then copy that over.
338 // FIXME: We do not properly validate in case of ZSTs and when doing it in memory!
339 // However, this only affects direct calls of the intrinsic; calls to the stable
340 // functions wrapping them do get their validation.
341 // FIXME: should we check that the destination pointer is aligned even for ZSTs?
342 if !dest.layout.is_zst() {
343 match dest.layout.abi {
344 layout::Abi::Scalar(ref s) => {
345 let x = Scalar::from_int(0, s.value.size(this));
346 this.write_scalar(x, dest)?;
348 layout::Abi::ScalarPair(ref s1, ref s2) => {
349 let x = Scalar::from_int(0, s1.value.size(this));
350 let y = Scalar::from_int(0, s2.value.size(this));
351 this.write_immediate(Immediate::ScalarPair(x.into(), y.into()), dest)?;
355 let mplace = this.force_allocation(dest)?;
356 assert!(mplace.meta.is_none());
357 // not a zst, must be valid pointer
358 let ptr = mplace.ptr.to_ptr()?;
359 this.memory_mut().get_mut(ptr.alloc_id)?.write_repeat(tcx, ptr, 0, dest.layout.size)?;
366 let ty = substs.type_at(0);
367 let layout = this.layout_of(ty)?;
368 let align = layout.align.pref.bytes();
369 let ptr_size = this.pointer_size();
370 let align_val = Scalar::from_uint(align as u128, ptr_size);
371 this.write_scalar(align_val, dest)?;
375 let place = this.deref_operand(args[0])?;
376 this.copy_op(args[1], place.into())?;
380 let offset = this.read_scalar(args[1])?.to_isize(this)?;
381 let ptr = this.read_scalar(args[0])?.not_undef()?;
382 let result_ptr = this.pointer_offset_inbounds(ptr, substs.type_at(0), offset)?;
383 this.write_scalar(result_ptr, dest)?;
386 "panic_if_uninhabited" => {
387 let ty = substs.type_at(0);
388 let layout = this.layout_of(ty)?;
389 if layout.abi.is_uninhabited() {
390 throw_ub_format!("Trying to instantiate uninhabited type {}", ty)
395 // FIXME: Using host floats.
396 let f = f32::from_bits(this.read_scalar(args[0])?.to_u32()?);
397 let f2 = f32::from_bits(this.read_scalar(args[1])?.to_u32()?);
399 Scalar::from_u32(f.powf(f2).to_bits()),
405 // FIXME: Using host floats.
406 let f = f64::from_bits(this.read_scalar(args[0])?.to_u64()?);
407 let f2 = f64::from_bits(this.read_scalar(args[1])?.to_u64()?);
409 Scalar::from_u64(f.powf(f2).to_bits()),
415 let a = this.read_scalar(args[0])?.to_f32()?;
416 let b = this.read_scalar(args[1])?.to_f32()?;
417 let c = this.read_scalar(args[2])?.to_f32()?;
418 let res = a.mul_add(b, c).value;
420 Scalar::from_f32(res),
426 let a = this.read_scalar(args[0])?.to_f64()?;
427 let b = this.read_scalar(args[1])?.to_f64()?;
428 let c = this.read_scalar(args[2])?.to_f64()?;
429 let res = a.mul_add(b, c).value;
431 Scalar::from_f64(res),
437 // FIXME: Using host floats.
438 let f = f32::from_bits(this.read_scalar(args[0])?.to_u32()?);
439 let i = this.read_scalar(args[1])?.to_i32()?;
441 Scalar::from_u32(f.powi(i).to_bits()),
447 // FIXME: Using host floats.
448 let f = f64::from_bits(this.read_scalar(args[0])?.to_u64()?);
449 let i = this.read_scalar(args[1])?.to_i32()?;
451 Scalar::from_u64(f.powi(i).to_bits()),
457 let mplace = this.deref_operand(args[0])?;
458 let (size, _) = this.size_and_align_of_mplace(mplace)?
459 .expect("size_of_val called on extern type");
460 let ptr_size = this.pointer_size();
462 Scalar::from_uint(size.bytes() as u128, ptr_size),
469 let mplace = this.deref_operand(args[0])?;
470 let (_, align) = this.size_and_align_of_mplace(mplace)?
471 .expect("size_of_val called on extern type");
472 let ptr_size = this.pointer_size();
474 Scalar::from_uint(align.bytes(), ptr_size),
480 let l = this.read_immediate(args[0])?;
481 let r = this.read_immediate(args[1])?;
482 let rval = r.to_scalar()?.to_bits(args[1].layout.size)?;
484 throw_ub_format!("Division by 0 in unchecked_div");
486 this.binop_ignore_overflow(
495 let l = this.read_immediate(args[0])?;
496 let r = this.read_immediate(args[1])?;
497 let rval = r.to_scalar()?.to_bits(args[1].layout.size)?;
499 throw_ub_format!("Division by 0 in unchecked_rem");
501 this.binop_ignore_overflow(
509 "unchecked_add" | "unchecked_sub" | "unchecked_mul" => {
510 let l = this.read_immediate(args[0])?;
511 let r = this.read_immediate(args[1])?;
512 let op = match intrinsic_name.get() {
513 "unchecked_add" => mir::BinOp::Add,
514 "unchecked_sub" => mir::BinOp::Sub,
515 "unchecked_mul" => mir::BinOp::Mul,
518 let (res, overflowed) = this.binary_op(op, l, r)?;
520 throw_ub_format!("Overflowing arithmetic in {}", intrinsic_name.get());
522 this.write_scalar(res, dest)?;
526 // Check fast path: we don't want to force an allocation in case the destination is a simple value,
527 // but we also do not want to create a new allocation with 0s and then copy that over.
528 // FIXME: We do not properly validate in case of ZSTs and when doing it in memory!
529 // However, this only affects direct calls of the intrinsic; calls to the stable
530 // functions wrapping them do get their validation.
531 // FIXME: should we check alignment for ZSTs?
532 use crate::ScalarMaybeUndef;
533 if !dest.layout.is_zst() {
534 match dest.layout.abi {
535 layout::Abi::Scalar(..) => {
536 let x = ScalarMaybeUndef::Undef;
537 this.write_immediate(Immediate::Scalar(x), dest)?;
539 layout::Abi::ScalarPair(..) => {
540 let x = ScalarMaybeUndef::Undef;
541 this.write_immediate(Immediate::ScalarPair(x, x), dest)?;
545 let mplace = this.force_allocation(dest)?;
546 assert!(mplace.meta.is_none());
547 let ptr = mplace.ptr.to_ptr()?;
549 .get_mut(ptr.alloc_id)?
550 .mark_definedness(ptr, dest.layout.size, false);
557 let ty = substs.type_at(0);
558 let ty_layout = this.layout_of(ty)?;
559 let val_byte = this.read_scalar(args[1])?.to_u8()?;
560 let ptr = this.read_scalar(args[0])?.not_undef()?;
561 let count = this.read_scalar(args[2])?.to_usize(this)?;
562 let byte_count = ty_layout.size * count;
563 match this.memory().check_ptr_access(ptr, byte_count, ty_layout.align.abi)? {
566 .get_mut(ptr.alloc_id)?
567 .write_repeat(tcx, ptr, val_byte, byte_count)?;
570 // Size is 0, nothing to do.
575 name => throw_unsup_format!("unimplemented intrinsic: {}", name),