1 use rustc_apfloat::Float;
3 use rustc::mir::interpret::{InterpResult, PointerArithmetic};
4 use rustc::ty::layout::{self, LayoutOf, Size, Align};
8 PlaceTy, OpTy, Immediate, Scalar, Tag,
12 impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
13 pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
16 instance: ty::Instance<'tcx>,
17 args: &[OpTy<'tcx, Tag>],
18 dest: PlaceTy<'tcx, Tag>,
19 ) -> InterpResult<'tcx> {
20 let this = self.eval_context_mut();
21 if this.emulate_intrinsic(instance, args, dest)? {
24 let tcx = &{this.tcx.tcx};
25 let substs = instance.substs;
27 // All these intrinsics take raw pointers, so if we access memory directly
28 // (as opposed to through a place), we have to remember to erase any tag
29 // that might still hang around!
31 let intrinsic_name = &*tcx.item_name(instance.def_id()).as_str();
32 match intrinsic_name {
34 let offset = this.read_scalar(args[1])?.to_isize(this)?;
35 let ptr = this.read_scalar(args[0])?.not_undef()?;
37 let pointee_ty = substs.type_at(0);
38 let pointee_size = this.layout_of(pointee_ty)?.size.bytes() as i64;
39 let offset = offset.overflowing_mul(pointee_size).0;
40 let result_ptr = ptr.ptr_wrapping_signed_offset(offset, this);
41 this.write_scalar(result_ptr, dest)?;
45 let cond = this.read_scalar(args[0])?.to_bool()?;
47 throw_ub_format!("`assume` intrinsic called with `false`");
52 let place = this.deref_operand(args[0])?;
53 this.copy_op(place.into(), dest)?;
57 let place = this.deref_operand(args[0])?;
58 this.copy_op(args[1], place.into())?;
62 "atomic_load_relaxed" |
63 "atomic_load_acq" => {
64 let place = this.deref_operand(args[0])?;
65 let val = this.read_scalar(place.into())?; // make sure it fits into a scalar; otherwise it cannot be atomic
67 // Check alignment requirements. Atomics must always be aligned to their size,
68 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
70 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
71 this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
73 this.write_scalar(val, dest)?;
77 "atomic_store_relaxed" |
78 "atomic_store_rel" => {
79 let place = this.deref_operand(args[0])?;
80 let val = this.read_scalar(args[1])?; // make sure it fits into a scalar; otherwise it cannot be atomic
82 // Check alignment requirements. Atomics must always be aligned to their size,
83 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
85 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
86 this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
88 this.write_scalar(val, place.into())?;
93 "atomic_fence_acqrel" |
95 // we are inherently singlethreaded and singlecored, this is a nop
98 _ if intrinsic_name.starts_with("atomic_xchg") => {
99 let place = this.deref_operand(args[0])?;
100 let new = this.read_scalar(args[1])?;
101 let old = this.read_scalar(place.into())?;
103 // Check alignment requirements. Atomics must always be aligned to their size,
104 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
106 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
107 this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
109 this.write_scalar(old, dest)?; // old value is returned
110 this.write_scalar(new, place.into())?;
113 _ if intrinsic_name.starts_with("atomic_cxchg") => {
114 let place = this.deref_operand(args[0])?;
115 let expect_old = this.read_immediate(args[1])?; // read as immediate for the sake of `binary_op()`
116 let new = this.read_scalar(args[2])?;
117 let old = this.read_immediate(place.into())?; // read as immediate for the sake of `binary_op()`
119 // Check alignment requirements. Atomics must always be aligned to their size,
120 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
122 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
123 this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
125 // binary_op will bail if either of them is not a scalar
126 let eq = this.overflowing_binary_op(mir::BinOp::Eq, old, expect_old)?.0;
127 let res = Immediate::ScalarPair(old.to_scalar_or_undef(), eq.into());
128 this.write_immediate(res, dest)?; // old value is returned
129 // update ptr depending on comparison
131 this.write_scalar(new, place.into())?;
139 "atomic_or_relaxed" |
143 "atomic_xor_acqrel" |
144 "atomic_xor_relaxed" |
148 "atomic_and_acqrel" |
149 "atomic_and_relaxed" |
153 "atomic_nand_acqrel" |
154 "atomic_nand_relaxed" |
158 "atomic_xadd_acqrel" |
159 "atomic_xadd_relaxed" |
163 "atomic_xsub_acqrel" |
164 "atomic_xsub_relaxed" => {
165 let place = this.deref_operand(args[0])?;
166 if !place.layout.ty.is_integral() {
167 bug!("Atomic arithmetic operations only work on integer types");
169 let rhs = this.read_immediate(args[1])?;
170 let old = this.read_immediate(place.into())?;
172 // Check alignment requirements. Atomics must always be aligned to their size,
173 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
175 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
176 this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
178 this.write_immediate(*old, dest)?; // old value is returned
179 let (op, neg) = match intrinsic_name.split('_').nth(1).unwrap() {
180 "or" => (mir::BinOp::BitOr, false),
181 "xor" => (mir::BinOp::BitXor, false),
182 "and" => (mir::BinOp::BitAnd, false),
183 "xadd" => (mir::BinOp::Add, false),
184 "xsub" => (mir::BinOp::Sub, false),
185 "nand" => (mir::BinOp::BitAnd, true),
188 // Atomics wrap around on overflow.
189 let val = this.binary_op(op, old, rhs)?;
191 this.unary_op(mir::UnOp::Not, val)?
195 this.write_immediate(*val, place.into())?;
198 "breakpoint" => unimplemented!(), // halt miri
201 "copy_nonoverlapping" => {
202 let elem_ty = substs.type_at(0);
203 let elem_layout = this.layout_of(elem_ty)?;
204 let elem_size = elem_layout.size.bytes();
205 let count = this.read_scalar(args[2])?.to_usize(this)?;
206 let elem_align = elem_layout.align.abi;
208 let size = Size::from_bytes(count * elem_size);
209 let src = this.read_scalar(args[0])?.not_undef()?;
210 let src = this.memory.check_ptr_access(src, size, elem_align)?;
211 let dest = this.read_scalar(args[1])?.not_undef()?;
212 let dest = this.memory.check_ptr_access(dest, size, elem_align)?;
214 if let (Some(src), Some(dest)) = (src, dest) {
219 intrinsic_name.ends_with("_nonoverlapping"),
224 "discriminant_value" => {
225 let place = this.deref_operand(args[0])?;
226 let discr_val = this.read_discriminant(place.into())?.0;
227 this.write_scalar(Scalar::from_uint(discr_val, dest.layout.size), dest)?;
230 "sinf32" | "fabsf32" | "cosf32" | "sqrtf32" | "expf32" | "exp2f32" | "logf32" |
231 "log10f32" | "log2f32" | "floorf32" | "ceilf32" | "truncf32" | "roundf32" => {
232 // FIXME: Using host floats.
233 let f = f32::from_bits(this.read_scalar(args[0])?.to_u32()?);
234 let f = match intrinsic_name {
236 "fabsf32" => f.abs(),
238 "sqrtf32" => f.sqrt(),
240 "exp2f32" => f.exp2(),
242 "log10f32" => f.log10(),
243 "log2f32" => f.log2(),
244 "floorf32" => f.floor(),
245 "ceilf32" => f.ceil(),
246 "truncf32" => f.trunc(),
247 "roundf32" => f.round(),
250 this.write_scalar(Scalar::from_u32(f.to_bits()), dest)?;
253 "sinf64" | "fabsf64" | "cosf64" | "sqrtf64" | "expf64" | "exp2f64" | "logf64" |
254 "log10f64" | "log2f64" | "floorf64" | "ceilf64" | "truncf64" | "roundf64" => {
255 // FIXME: Using host floats.
256 let f = f64::from_bits(this.read_scalar(args[0])?.to_u64()?);
257 let f = match intrinsic_name {
259 "fabsf64" => f.abs(),
261 "sqrtf64" => f.sqrt(),
263 "exp2f64" => f.exp2(),
265 "log10f64" => f.log10(),
266 "log2f64" => f.log2(),
267 "floorf64" => f.floor(),
268 "ceilf64" => f.ceil(),
269 "truncf64" => f.trunc(),
270 "roundf64" => f.round(),
273 this.write_scalar(Scalar::from_u64(f.to_bits()), dest)?;
276 "fadd_fast" | "fsub_fast" | "fmul_fast" | "fdiv_fast" | "frem_fast" => {
277 let a = this.read_immediate(args[0])?;
278 let b = this.read_immediate(args[1])?;
279 let op = match intrinsic_name {
280 "fadd_fast" => mir::BinOp::Add,
281 "fsub_fast" => mir::BinOp::Sub,
282 "fmul_fast" => mir::BinOp::Mul,
283 "fdiv_fast" => mir::BinOp::Div,
284 "frem_fast" => mir::BinOp::Rem,
287 this.binop_ignore_overflow(op, a, b, dest)?;
290 "minnumf32" | "maxnumf32" => {
291 let a = this.read_scalar(args[0])?.to_f32()?;
292 let b = this.read_scalar(args[1])?.to_f32()?;
293 let res = if intrinsic_name.starts_with("min") {
298 this.write_scalar(Scalar::from_f32(res), dest)?;
301 "minnumf64" | "maxnumf64" => {
302 let a = this.read_scalar(args[0])?.to_f64()?;
303 let b = this.read_scalar(args[1])?.to_f64()?;
304 let res = if intrinsic_name.starts_with("min") {
309 this.write_scalar(Scalar::from_f64(res), dest)?;
313 // Performs an exact division, resulting in undefined behavior where
314 // `x % y != 0` or `y == 0` or `x == T::min_value() && y == -1`
315 let a = this.read_immediate(args[0])?;
316 let b = this.read_immediate(args[1])?;
318 if this.overflowing_binary_op(mir::BinOp::Rem, a, b)?.0.to_bits(dest.layout.size)? != 0 {
319 // Check if `b` is -1, which is the "min_value / -1" case.
320 let minus1 = Scalar::from_int(-1, dest.layout.size);
321 return Err(if b.to_scalar().unwrap() == minus1 {
322 err_ub_format!("exact_div: result of dividing MIN by -1 cannot be represented")
324 err_ub_format!("exact_div: {:?} cannot be divided by {:?} without remainder", *a, *b)
327 this.binop_ignore_overflow(mir::BinOp::Div, a, b, dest)?;
332 "likely" | "unlikely" => {
333 // These just return their argument
334 let b = this.read_immediate(args[0])?;
335 this.write_immediate(*b, dest)?;
339 // Check fast path: we don't want to force an allocation in case the destination is a simple value,
340 // but we also do not want to create a new allocation with 0s and then copy that over.
341 // FIXME: We do not properly validate in case of ZSTs and when doing it in memory!
342 // However, this only affects direct calls of the intrinsic; calls to the stable
343 // functions wrapping them do get their validation.
344 // FIXME: should we check that the destination pointer is aligned even for ZSTs?
345 if !dest.layout.is_zst() {
346 match dest.layout.abi {
347 layout::Abi::Scalar(ref s) => {
348 let x = Scalar::from_int(0, s.value.size(this));
349 this.write_scalar(x, dest)?;
351 layout::Abi::ScalarPair(ref s1, ref s2) => {
352 let x = Scalar::from_int(0, s1.value.size(this));
353 let y = Scalar::from_int(0, s2.value.size(this));
354 this.write_immediate(Immediate::ScalarPair(x.into(), y.into()), dest)?;
358 let mplace = this.force_allocation(dest)?;
359 assert!(mplace.meta.is_none());
360 // not a zst, must be valid pointer
361 let ptr = mplace.ptr.to_ptr()?;
362 this.memory.get_mut(ptr.alloc_id)?.write_repeat(tcx, ptr, 0, dest.layout.size)?;
369 let ty = substs.type_at(0);
370 let layout = this.layout_of(ty)?;
371 let align = layout.align.pref.bytes();
372 let ptr_size = this.pointer_size();
373 let align_val = Scalar::from_uint(align as u128, ptr_size);
374 this.write_scalar(align_val, dest)?;
378 let place = this.deref_operand(args[0])?;
379 this.copy_op(args[1], place.into())?;
383 let offset = this.read_scalar(args[1])?.to_isize(this)?;
384 let ptr = this.read_scalar(args[0])?.not_undef()?;
385 let result_ptr = this.pointer_offset_inbounds(ptr, substs.type_at(0), offset)?;
386 this.write_scalar(result_ptr, dest)?;
389 "panic_if_uninhabited" => {
390 let ty = substs.type_at(0);
391 let layout = this.layout_of(ty)?;
392 if layout.abi.is_uninhabited() {
393 throw_ub_format!("Trying to instantiate uninhabited type {}", ty)
398 // FIXME: Using host floats.
399 let f = f32::from_bits(this.read_scalar(args[0])?.to_u32()?);
400 let f2 = f32::from_bits(this.read_scalar(args[1])?.to_u32()?);
402 Scalar::from_u32(f.powf(f2).to_bits()),
408 // FIXME: Using host floats.
409 let f = f64::from_bits(this.read_scalar(args[0])?.to_u64()?);
410 let f2 = f64::from_bits(this.read_scalar(args[1])?.to_u64()?);
412 Scalar::from_u64(f.powf(f2).to_bits()),
418 let a = this.read_scalar(args[0])?.to_f32()?;
419 let b = this.read_scalar(args[1])?.to_f32()?;
420 let c = this.read_scalar(args[2])?.to_f32()?;
421 let res = a.mul_add(b, c).value;
423 Scalar::from_f32(res),
429 let a = this.read_scalar(args[0])?.to_f64()?;
430 let b = this.read_scalar(args[1])?.to_f64()?;
431 let c = this.read_scalar(args[2])?.to_f64()?;
432 let res = a.mul_add(b, c).value;
434 Scalar::from_f64(res),
440 // FIXME: Using host floats.
441 let f = f32::from_bits(this.read_scalar(args[0])?.to_u32()?);
442 let i = this.read_scalar(args[1])?.to_i32()?;
444 Scalar::from_u32(f.powi(i).to_bits()),
450 // FIXME: Using host floats.
451 let f = f64::from_bits(this.read_scalar(args[0])?.to_u64()?);
452 let i = this.read_scalar(args[1])?.to_i32()?;
454 Scalar::from_u64(f.powi(i).to_bits()),
460 let mplace = this.deref_operand(args[0])?;
461 let (size, _) = this.size_and_align_of_mplace(mplace)?
462 .expect("size_of_val called on extern type");
463 let ptr_size = this.pointer_size();
465 Scalar::from_uint(size.bytes() as u128, ptr_size),
472 let mplace = this.deref_operand(args[0])?;
473 let (_, align) = this.size_and_align_of_mplace(mplace)?
474 .expect("size_of_val called on extern type");
475 let ptr_size = this.pointer_size();
477 Scalar::from_uint(align.bytes(), ptr_size),
483 let l = this.read_immediate(args[0])?;
484 let r = this.read_immediate(args[1])?;
485 let rval = r.to_scalar()?.to_bits(args[1].layout.size)?;
487 throw_ub_format!("Division by 0 in unchecked_div");
489 this.binop_ignore_overflow(
498 let l = this.read_immediate(args[0])?;
499 let r = this.read_immediate(args[1])?;
500 let rval = r.to_scalar()?.to_bits(args[1].layout.size)?;
502 throw_ub_format!("Division by 0 in unchecked_rem");
504 this.binop_ignore_overflow(
512 "unchecked_add" | "unchecked_sub" | "unchecked_mul" => {
513 let l = this.read_immediate(args[0])?;
514 let r = this.read_immediate(args[1])?;
515 let op = match intrinsic_name {
516 "unchecked_add" => mir::BinOp::Add,
517 "unchecked_sub" => mir::BinOp::Sub,
518 "unchecked_mul" => mir::BinOp::Mul,
521 let (res, overflowed, _ty) = this.overflowing_binary_op(op, l, r)?;
523 throw_ub_format!("Overflowing arithmetic in {}", intrinsic_name);
525 this.write_scalar(res, dest)?;
529 // Check fast path: we don't want to force an allocation in case the destination is a simple value,
530 // but we also do not want to create a new allocation with 0s and then copy that over.
531 // FIXME: We do not properly validate in case of ZSTs and when doing it in memory!
532 // However, this only affects direct calls of the intrinsic; calls to the stable
533 // functions wrapping them do get their validation.
534 // FIXME: should we check alignment for ZSTs?
535 use crate::ScalarMaybeUndef;
536 if !dest.layout.is_zst() {
537 match dest.layout.abi {
538 layout::Abi::Scalar(..) => {
539 let x = ScalarMaybeUndef::Undef;
540 this.write_immediate(Immediate::Scalar(x), dest)?;
542 layout::Abi::ScalarPair(..) => {
543 let x = ScalarMaybeUndef::Undef;
544 this.write_immediate(Immediate::ScalarPair(x, x), dest)?;
548 let mplace = this.force_allocation(dest)?;
549 assert!(mplace.meta.is_none());
550 let ptr = mplace.ptr.to_ptr()?;
552 .get_mut(ptr.alloc_id)?
553 .mark_definedness(ptr, dest.layout.size, false);
560 let ty = substs.type_at(0);
561 let ty_layout = this.layout_of(ty)?;
562 let val_byte = this.read_scalar(args[1])?.to_u8()?;
563 let ptr = this.read_scalar(args[0])?.not_undef()?;
564 let count = this.read_scalar(args[2])?.to_usize(this)?;
565 let byte_count = ty_layout.size * count;
566 match this.memory.check_ptr_access(ptr, byte_count, ty_layout.align.abi)? {
569 .get_mut(ptr.alloc_id)?
570 .write_repeat(tcx, ptr, val_byte, byte_count)?;
573 // Size is 0, nothing to do.
578 name => throw_unsup_format!("unimplemented intrinsic: {}", name),