4 use rustc::mir::interpret::{InterpResult, PointerArithmetic};
6 use rustc::ty::layout::{self, Align, LayoutOf, Size};
7 use rustc_apfloat::Float;
8 use syntax::source_map::Span;
12 impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
13 pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
17 instance: ty::Instance<'tcx>,
18 args: &[OpTy<'tcx, Tag>],
19 ret: Option<(PlaceTy<'tcx, Tag>, mir::BasicBlock)>,
20 unwind: Option<mir::BasicBlock>,
21 ) -> InterpResult<'tcx> {
22 let this = self.eval_context_mut();
23 if this.emulate_intrinsic(span, instance, args, ret)? {
26 let tcx = &{ this.tcx.tcx };
27 let substs = instance.substs;
29 // All these intrinsics take raw pointers, so if we access memory directly
30 // (as opposed to through a place), we have to remember to erase any tag
31 // that might still hang around!
32 let intrinsic_name = &*tcx.item_name(instance.def_id()).as_str();
34 // Handle diverging intrinsics.
35 let (dest, ret) = match intrinsic_name {
37 throw_machine_stop!(TerminationInfo::Abort);
39 "miri_start_panic" => return this.handle_miri_start_panic(args, unwind),
41 if let Some(p) = ret {
44 throw_unsup_format!("unimplemented (diverging) intrinsic: {}", intrinsic_name);
48 match intrinsic_name {
50 let offset = this.read_scalar(args[1])?.to_machine_isize(this)?;
51 let ptr = this.read_scalar(args[0])?.not_undef()?;
53 let pointee_ty = substs.type_at(0);
54 let pointee_size = this.layout_of(pointee_ty)?.size.bytes() as i64;
55 let offset = offset.overflowing_mul(pointee_size).0;
56 let result_ptr = ptr.ptr_wrapping_signed_offset(offset, this);
57 this.write_scalar(result_ptr, dest)?;
61 let cond = this.read_scalar(args[0])?.to_bool()?;
63 throw_ub_format!("`assume` intrinsic called with `false`");
68 let place = this.deref_operand(args[0])?;
69 this.copy_op(place.into(), dest)?;
73 let place = this.deref_operand(args[0])?;
74 this.copy_op(args[1], place.into())?;
79 | "atomic_load_relaxed"
82 let place = this.deref_operand(args[0])?;
83 let val = this.read_scalar(place.into())?; // make sure it fits into a scalar; otherwise it cannot be atomic
85 // Check alignment requirements. Atomics must always be aligned to their size,
86 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
88 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
89 this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
91 this.write_scalar(val, dest)?;
96 | "atomic_store_relaxed"
99 let place = this.deref_operand(args[0])?;
100 let val = this.read_scalar(args[1])?; // make sure it fits into a scalar; otherwise it cannot be atomic
102 // Check alignment requirements. Atomics must always be aligned to their size,
103 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
105 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
106 this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
108 this.write_scalar(val, place.into())?;
114 | "atomic_fence_acqrel"
117 // we are inherently singlethreaded and singlecored, this is a nop
120 _ if intrinsic_name.starts_with("atomic_xchg") => {
121 let place = this.deref_operand(args[0])?;
122 let new = this.read_scalar(args[1])?;
123 let old = this.read_scalar(place.into())?;
125 // Check alignment requirements. Atomics must always be aligned to their size,
126 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
128 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
129 this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
131 this.write_scalar(old, dest)?; // old value is returned
132 this.write_scalar(new, place.into())?;
135 _ if intrinsic_name.starts_with("atomic_cxchg") => {
136 let place = this.deref_operand(args[0])?;
137 let expect_old = this.read_immediate(args[1])?; // read as immediate for the sake of `binary_op()`
138 let new = this.read_scalar(args[2])?;
139 let old = this.read_immediate(place.into())?; // read as immediate for the sake of `binary_op()`
141 // Check alignment requirements. Atomics must always be aligned to their size,
142 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
144 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
145 this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
147 // `binary_op` will bail if either of them is not a scalar.
148 let eq = this.overflowing_binary_op(mir::BinOp::Eq, old, expect_old)?.0;
149 let res = Immediate::ScalarPair(old.to_scalar_or_undef(), eq.into());
151 this.write_immediate(res, dest)?;
152 // Update ptr depending on comparison.
154 this.write_scalar(new, place.into())?;
163 | "atomic_or_relaxed"
167 | "atomic_xor_acqrel"
168 | "atomic_xor_relaxed"
172 | "atomic_and_acqrel"
173 | "atomic_and_relaxed"
177 | "atomic_nand_acqrel"
178 | "atomic_nand_relaxed"
182 | "atomic_xadd_acqrel"
183 | "atomic_xadd_relaxed"
187 | "atomic_xsub_acqrel"
188 | "atomic_xsub_relaxed"
190 let place = this.deref_operand(args[0])?;
191 if !place.layout.ty.is_integral() {
192 bug!("Atomic arithmetic operations only work on integer types");
194 let rhs = this.read_immediate(args[1])?;
195 let old = this.read_immediate(place.into())?;
197 // Check alignment requirements. Atomics must always be aligned to their size,
198 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
200 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
201 this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
203 this.write_immediate(*old, dest)?; // old value is returned
204 let (op, neg) = match intrinsic_name.split('_').nth(1).unwrap() {
205 "or" => (mir::BinOp::BitOr, false),
206 "xor" => (mir::BinOp::BitXor, false),
207 "and" => (mir::BinOp::BitAnd, false),
208 "xadd" => (mir::BinOp::Add, false),
209 "xsub" => (mir::BinOp::Sub, false),
210 "nand" => (mir::BinOp::BitAnd, true),
213 // Atomics wrap around on overflow.
214 let val = this.binary_op(op, old, rhs)?;
215 let val = if neg { this.unary_op(mir::UnOp::Not, val)? } else { val };
216 this.write_immediate(*val, place.into())?;
219 "breakpoint" => unimplemented!(), // halt miri
223 | "copy_nonoverlapping"
225 let elem_ty = substs.type_at(0);
226 let elem_layout = this.layout_of(elem_ty)?;
227 let elem_size = elem_layout.size.bytes();
228 let count = this.read_scalar(args[2])?.to_machine_usize(this)?;
229 let elem_align = elem_layout.align.abi;
231 let size = Size::from_bytes(count * elem_size);
232 let src = this.read_scalar(args[0])?.not_undef()?;
233 let src = this.memory.check_ptr_access(src, size, elem_align)?;
234 let dest = this.read_scalar(args[1])?.not_undef()?;
235 let dest = this.memory.check_ptr_access(dest, size, elem_align)?;
237 if let (Some(src), Some(dest)) = (src, dest) {
242 intrinsic_name.ends_with("_nonoverlapping"),
247 "discriminant_value" => {
248 let place = this.deref_operand(args[0])?;
249 let discr_val = this.read_discriminant(place.into())?.0;
250 this.write_scalar(Scalar::from_uint(discr_val, dest.layout.size), dest)?;
268 // FIXME: Using host floats.
269 let f = f32::from_bits(this.read_scalar(args[0])?.to_u32()?);
270 let f = match intrinsic_name {
272 "fabsf32" => f.abs(),
274 "sqrtf32" => f.sqrt(),
276 "exp2f32" => f.exp2(),
278 "log10f32" => f.log10(),
279 "log2f32" => f.log2(),
280 "floorf32" => f.floor(),
281 "ceilf32" => f.ceil(),
282 "truncf32" => f.trunc(),
283 "roundf32" => f.round(),
286 this.write_scalar(Scalar::from_u32(f.to_bits()), dest)?;
304 // FIXME: Using host floats.
305 let f = f64::from_bits(this.read_scalar(args[0])?.to_u64()?);
306 let f = match intrinsic_name {
308 "fabsf64" => f.abs(),
310 "sqrtf64" => f.sqrt(),
312 "exp2f64" => f.exp2(),
314 "log10f64" => f.log10(),
315 "log2f64" => f.log2(),
316 "floorf64" => f.floor(),
317 "ceilf64" => f.ceil(),
318 "truncf64" => f.trunc(),
319 "roundf64" => f.round(),
322 this.write_scalar(Scalar::from_u64(f.to_bits()), dest)?;
332 let a = this.read_immediate(args[0])?;
333 let b = this.read_immediate(args[1])?;
334 let op = match intrinsic_name {
335 "fadd_fast" => mir::BinOp::Add,
336 "fsub_fast" => mir::BinOp::Sub,
337 "fmul_fast" => mir::BinOp::Mul,
338 "fdiv_fast" => mir::BinOp::Div,
339 "frem_fast" => mir::BinOp::Rem,
342 this.binop_ignore_overflow(op, a, b, dest)?;
350 let a = this.read_scalar(args[0])?.to_f32()?;
351 let b = this.read_scalar(args[1])?.to_f32()?;
352 let res = match intrinsic_name {
353 "minnumf32" => a.min(b),
354 "maxnumf32" => a.max(b),
355 "copysignf32" => a.copy_sign(b),
358 this.write_scalar(Scalar::from_f32(res), dest)?;
366 let a = this.read_scalar(args[0])?.to_f64()?;
367 let b = this.read_scalar(args[1])?.to_f64()?;
368 let res = match intrinsic_name {
369 "minnumf64" => a.min(b),
370 "maxnumf64" => a.max(b),
371 "copysignf64" => a.copy_sign(b),
374 this.write_scalar(Scalar::from_f64(res), dest)?;
378 this.exact_div(this.read_immediate(args[0])?, this.read_immediate(args[1])?, dest)?,
386 // These just return their argument
387 let b = this.read_immediate(args[0])?;
388 this.write_immediate(*b, dest)?;
392 // Check fast path: we don't want to force an allocation in case the destination is a simple value,
393 // but we also do not want to create a new allocation with 0s and then copy that over.
394 // FIXME: We do not properly validate in case of ZSTs and when doing it in memory!
395 // However, this only affects direct calls of the intrinsic; calls to the stable
396 // functions wrapping them do get their validation.
397 // FIXME: should we check that the destination pointer is aligned even for ZSTs?
398 if !dest.layout.is_zst() {
399 match dest.layout.abi {
400 layout::Abi::Scalar(ref s) => {
401 let x = Scalar::from_int(0, s.value.size(this));
402 this.write_scalar(x, dest)?;
404 layout::Abi::ScalarPair(ref s1, ref s2) => {
405 let x = Scalar::from_int(0, s1.value.size(this));
406 let y = Scalar::from_int(0, s2.value.size(this));
407 this.write_immediate(Immediate::ScalarPair(x.into(), y.into()), dest)?;
411 let mplace = this.force_allocation(dest)?;
412 mplace.meta.unwrap_none(); // must be sized
413 this.memory.write_bytes(
415 iter::repeat(0u8).take(dest.layout.size.bytes() as usize),
423 let ty = substs.type_at(0);
424 let layout = this.layout_of(ty)?;
425 let align = layout.align.pref.bytes();
426 let ptr_size = this.pointer_size();
427 let align_val = Scalar::from_uint(align as u128, ptr_size);
428 this.write_scalar(align_val, dest)?;
432 let place = this.deref_operand(args[0])?;
433 this.copy_op(args[1], place.into())?;
437 let offset = this.read_scalar(args[1])?.to_machine_isize(this)?;
438 let ptr = this.read_scalar(args[0])?.not_undef()?;
439 let result_ptr = this.pointer_offset_inbounds(ptr, substs.type_at(0), offset)?;
440 this.write_scalar(result_ptr, dest)?;
443 "panic_if_uninhabited" => {
444 let ty = substs.type_at(0);
445 let layout = this.layout_of(ty)?;
446 if layout.abi.is_uninhabited() {
447 // FIXME: This should throw a panic in the interpreted program instead.
448 throw_unsup_format!("Trying to instantiate uninhabited type {}", ty)
453 // FIXME: Using host floats.
454 let f = f32::from_bits(this.read_scalar(args[0])?.to_u32()?);
455 let f2 = f32::from_bits(this.read_scalar(args[1])?.to_u32()?);
456 this.write_scalar(Scalar::from_u32(f.powf(f2).to_bits()), dest)?;
460 // FIXME: Using host floats.
461 let f = f64::from_bits(this.read_scalar(args[0])?.to_u64()?);
462 let f2 = f64::from_bits(this.read_scalar(args[1])?.to_u64()?);
463 this.write_scalar(Scalar::from_u64(f.powf(f2).to_bits()), dest)?;
467 let a = this.read_scalar(args[0])?.to_f32()?;
468 let b = this.read_scalar(args[1])?.to_f32()?;
469 let c = this.read_scalar(args[2])?.to_f32()?;
470 let res = a.mul_add(b, c).value;
471 this.write_scalar(Scalar::from_f32(res), dest)?;
475 let a = this.read_scalar(args[0])?.to_f64()?;
476 let b = this.read_scalar(args[1])?.to_f64()?;
477 let c = this.read_scalar(args[2])?.to_f64()?;
478 let res = a.mul_add(b, c).value;
479 this.write_scalar(Scalar::from_f64(res), dest)?;
483 // FIXME: Using host floats.
484 let f = f32::from_bits(this.read_scalar(args[0])?.to_u32()?);
485 let i = this.read_scalar(args[1])?.to_i32()?;
486 this.write_scalar(Scalar::from_u32(f.powi(i).to_bits()), dest)?;
490 // FIXME: Using host floats.
491 let f = f64::from_bits(this.read_scalar(args[0])?.to_u64()?);
492 let i = this.read_scalar(args[1])?.to_i32()?;
493 this.write_scalar(Scalar::from_u64(f.powi(i).to_bits()), dest)?;
497 let mplace = this.deref_operand(args[0])?;
499 .size_and_align_of_mplace(mplace)?
500 .expect("size_of_val called on extern type");
501 let ptr_size = this.pointer_size();
502 this.write_scalar(Scalar::from_uint(size.bytes() as u128, ptr_size), dest)?;
509 let mplace = this.deref_operand(args[0])?;
510 let (_, align) = this
511 .size_and_align_of_mplace(mplace)?
512 .expect("size_of_val called on extern type");
513 let ptr_size = this.pointer_size();
514 this.write_scalar(Scalar::from_uint(align.bytes(), ptr_size), dest)?;
518 let l = this.read_immediate(args[0])?;
519 let r = this.read_immediate(args[1])?;
520 let rval = r.to_scalar()?.to_bits(args[1].layout.size)?;
522 throw_ub_format!("Division by 0 in unchecked_div");
524 this.binop_ignore_overflow(mir::BinOp::Div, l, r, dest)?;
528 let l = this.read_immediate(args[0])?;
529 let r = this.read_immediate(args[1])?;
530 let rval = r.to_scalar()?.to_bits(args[1].layout.size)?;
532 throw_ub_format!("Division by 0 in unchecked_rem");
534 this.binop_ignore_overflow(mir::BinOp::Rem, l, r, dest)?;
542 let l = this.read_immediate(args[0])?;
543 let r = this.read_immediate(args[1])?;
544 let op = match intrinsic_name {
545 "unchecked_add" => mir::BinOp::Add,
546 "unchecked_sub" => mir::BinOp::Sub,
547 "unchecked_mul" => mir::BinOp::Mul,
550 let (res, overflowed, _ty) = this.overflowing_binary_op(op, l, r)?;
552 throw_ub_format!("Overflowing arithmetic in {}", intrinsic_name);
554 this.write_scalar(res, dest)?;
558 // Check fast path: we don't want to force an allocation in case the destination is a simple value,
559 // but we also do not want to create a new allocation with 0s and then copy that over.
560 // FIXME: We do not properly validate in case of ZSTs and when doing it in memory!
561 // However, this only affects direct calls of the intrinsic; calls to the stable
562 // functions wrapping them do get their validation.
563 // FIXME: should we check alignment for ZSTs?
564 if !dest.layout.is_zst() {
565 match dest.layout.abi {
566 layout::Abi::Scalar(..) => {
567 let x = ScalarMaybeUndef::Undef;
568 this.write_immediate(Immediate::Scalar(x), dest)?;
570 layout::Abi::ScalarPair(..) => {
571 let x = ScalarMaybeUndef::Undef;
572 this.write_immediate(Immediate::ScalarPair(x, x), dest)?;
576 let mplace = this.force_allocation(dest)?;
577 mplace.meta.unwrap_none();
578 let ptr = mplace.ptr.to_ptr()?;
579 // We know the return place is in-bounds
580 this.memory.get_raw_mut(ptr.alloc_id)?.mark_definedness(
591 let ty = substs.type_at(0);
592 let ty_layout = this.layout_of(ty)?;
593 let val_byte = this.read_scalar(args[1])?.to_u8()?;
594 let ptr = this.read_scalar(args[0])?.not_undef()?;
595 let count = this.read_scalar(args[2])?.to_machine_usize(this)?;
596 let byte_count = ty_layout.size * count;
598 .write_bytes(ptr, iter::repeat(val_byte).take(byte_count.bytes() as usize))?;
601 name => throw_unsup_format!("unimplemented intrinsic: {}", name),
604 this.dump_place(*dest);
605 this.go_to_block(ret);