3 use rustc_apfloat::Float;
5 use rustc::mir::interpret::{InterpResult, PointerArithmetic};
6 use rustc::ty::layout::{self, LayoutOf, Size, Align};
8 use syntax::source_map::Span;
11 PlaceTy, OpTy, Immediate, Scalar, Tag,
12 OperatorEvalContextExt
15 impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
16 pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
20 instance: ty::Instance<'tcx>,
21 args: &[OpTy<'tcx, Tag>],
22 dest: PlaceTy<'tcx, Tag>,
23 ) -> InterpResult<'tcx> {
24 let this = self.eval_context_mut();
25 if this.emulate_intrinsic(span, instance, args, dest)? {
28 let tcx = &{this.tcx.tcx};
29 let substs = instance.substs;
31 // All these intrinsics take raw pointers, so if we access memory directly
32 // (as opposed to through a place), we have to remember to erase any tag
33 // that might still hang around!
35 let intrinsic_name = &*tcx.item_name(instance.def_id()).as_str();
36 match intrinsic_name {
38 let offset = this.read_scalar(args[1])?.to_machine_isize(this)?;
39 let ptr = this.read_scalar(args[0])?.not_undef()?;
41 let pointee_ty = substs.type_at(0);
42 let pointee_size = this.layout_of(pointee_ty)?.size.bytes() as i64;
43 let offset = offset.overflowing_mul(pointee_size).0;
44 let result_ptr = ptr.ptr_wrapping_signed_offset(offset, this);
45 this.write_scalar(result_ptr, dest)?;
49 let cond = this.read_scalar(args[0])?.to_bool()?;
51 throw_ub_format!("`assume` intrinsic called with `false`");
56 let place = this.deref_operand(args[0])?;
57 this.copy_op(place.into(), dest)?;
61 let place = this.deref_operand(args[0])?;
62 this.copy_op(args[1], place.into())?;
66 "atomic_load_relaxed" |
67 "atomic_load_acq" => {
68 let place = this.deref_operand(args[0])?;
69 let val = this.read_scalar(place.into())?; // make sure it fits into a scalar; otherwise it cannot be atomic
71 // Check alignment requirements. Atomics must always be aligned to their size,
72 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
74 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
75 this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
77 this.write_scalar(val, dest)?;
81 "atomic_store_relaxed" |
82 "atomic_store_rel" => {
83 let place = this.deref_operand(args[0])?;
84 let val = this.read_scalar(args[1])?; // make sure it fits into a scalar; otherwise it cannot be atomic
86 // Check alignment requirements. Atomics must always be aligned to their size,
87 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
89 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
90 this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
92 this.write_scalar(val, place.into())?;
97 "atomic_fence_acqrel" |
99 // we are inherently singlethreaded and singlecored, this is a nop
102 _ if intrinsic_name.starts_with("atomic_xchg") => {
103 let place = this.deref_operand(args[0])?;
104 let new = this.read_scalar(args[1])?;
105 let old = this.read_scalar(place.into())?;
107 // Check alignment requirements. Atomics must always be aligned to their size,
108 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
110 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
111 this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
113 this.write_scalar(old, dest)?; // old value is returned
114 this.write_scalar(new, place.into())?;
117 _ if intrinsic_name.starts_with("atomic_cxchg") => {
118 let place = this.deref_operand(args[0])?;
119 let expect_old = this.read_immediate(args[1])?; // read as immediate for the sake of `binary_op()`
120 let new = this.read_scalar(args[2])?;
121 let old = this.read_immediate(place.into())?; // read as immediate for the sake of `binary_op()`
123 // Check alignment requirements. Atomics must always be aligned to their size,
124 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
126 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
127 this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
129 // binary_op will bail if either of them is not a scalar
130 let eq = this.overflowing_binary_op(mir::BinOp::Eq, old, expect_old)?.0;
131 let res = Immediate::ScalarPair(old.to_scalar_or_undef(), eq.into());
132 this.write_immediate(res, dest)?; // old value is returned
133 // update ptr depending on comparison
135 this.write_scalar(new, place.into())?;
143 "atomic_or_relaxed" |
147 "atomic_xor_acqrel" |
148 "atomic_xor_relaxed" |
152 "atomic_and_acqrel" |
153 "atomic_and_relaxed" |
157 "atomic_nand_acqrel" |
158 "atomic_nand_relaxed" |
162 "atomic_xadd_acqrel" |
163 "atomic_xadd_relaxed" |
167 "atomic_xsub_acqrel" |
168 "atomic_xsub_relaxed" => {
169 let place = this.deref_operand(args[0])?;
170 if !place.layout.ty.is_integral() {
171 bug!("Atomic arithmetic operations only work on integer types");
173 let rhs = this.read_immediate(args[1])?;
174 let old = this.read_immediate(place.into())?;
176 // Check alignment requirements. Atomics must always be aligned to their size,
177 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
179 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
180 this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
182 this.write_immediate(*old, dest)?; // old value is returned
183 let (op, neg) = match intrinsic_name.split('_').nth(1).unwrap() {
184 "or" => (mir::BinOp::BitOr, false),
185 "xor" => (mir::BinOp::BitXor, false),
186 "and" => (mir::BinOp::BitAnd, false),
187 "xadd" => (mir::BinOp::Add, false),
188 "xsub" => (mir::BinOp::Sub, false),
189 "nand" => (mir::BinOp::BitAnd, true),
192 // Atomics wrap around on overflow.
193 let val = this.binary_op(op, old, rhs)?;
195 this.unary_op(mir::UnOp::Not, val)?
199 this.write_immediate(*val, place.into())?;
202 "breakpoint" => unimplemented!(), // halt miri
205 "copy_nonoverlapping" => {
206 let elem_ty = substs.type_at(0);
207 let elem_layout = this.layout_of(elem_ty)?;
208 let elem_size = elem_layout.size.bytes();
209 let count = this.read_scalar(args[2])?.to_machine_usize(this)?;
210 let elem_align = elem_layout.align.abi;
212 let size = Size::from_bytes(count * elem_size);
213 let src = this.read_scalar(args[0])?.not_undef()?;
214 let src = this.memory.check_ptr_access(src, size, elem_align)?;
215 let dest = this.read_scalar(args[1])?.not_undef()?;
216 let dest = this.memory.check_ptr_access(dest, size, elem_align)?;
218 if let (Some(src), Some(dest)) = (src, dest) {
223 intrinsic_name.ends_with("_nonoverlapping"),
228 "discriminant_value" => {
229 let place = this.deref_operand(args[0])?;
230 let discr_val = this.read_discriminant(place.into())?.0;
231 this.write_scalar(Scalar::from_uint(discr_val, dest.layout.size), dest)?;
234 "sinf32" | "fabsf32" | "cosf32" | "sqrtf32" | "expf32" | "exp2f32" | "logf32" |
235 "log10f32" | "log2f32" | "floorf32" | "ceilf32" | "truncf32" | "roundf32" => {
236 // FIXME: Using host floats.
237 let f = f32::from_bits(this.read_scalar(args[0])?.to_u32()?);
238 let f = match intrinsic_name {
240 "fabsf32" => f.abs(),
242 "sqrtf32" => f.sqrt(),
244 "exp2f32" => f.exp2(),
246 "log10f32" => f.log10(),
247 "log2f32" => f.log2(),
248 "floorf32" => f.floor(),
249 "ceilf32" => f.ceil(),
250 "truncf32" => f.trunc(),
251 "roundf32" => f.round(),
254 this.write_scalar(Scalar::from_u32(f.to_bits()), dest)?;
257 "sinf64" | "fabsf64" | "cosf64" | "sqrtf64" | "expf64" | "exp2f64" | "logf64" |
258 "log10f64" | "log2f64" | "floorf64" | "ceilf64" | "truncf64" | "roundf64" => {
259 // FIXME: Using host floats.
260 let f = f64::from_bits(this.read_scalar(args[0])?.to_u64()?);
261 let f = match intrinsic_name {
263 "fabsf64" => f.abs(),
265 "sqrtf64" => f.sqrt(),
267 "exp2f64" => f.exp2(),
269 "log10f64" => f.log10(),
270 "log2f64" => f.log2(),
271 "floorf64" => f.floor(),
272 "ceilf64" => f.ceil(),
273 "truncf64" => f.trunc(),
274 "roundf64" => f.round(),
277 this.write_scalar(Scalar::from_u64(f.to_bits()), dest)?;
280 "fadd_fast" | "fsub_fast" | "fmul_fast" | "fdiv_fast" | "frem_fast" => {
281 let a = this.read_immediate(args[0])?;
282 let b = this.read_immediate(args[1])?;
283 let op = match intrinsic_name {
284 "fadd_fast" => mir::BinOp::Add,
285 "fsub_fast" => mir::BinOp::Sub,
286 "fmul_fast" => mir::BinOp::Mul,
287 "fdiv_fast" => mir::BinOp::Div,
288 "frem_fast" => mir::BinOp::Rem,
291 this.binop_ignore_overflow(op, a, b, dest)?;
294 "minnumf32" | "maxnumf32" => {
295 let a = this.read_scalar(args[0])?.to_f32()?;
296 let b = this.read_scalar(args[1])?.to_f32()?;
297 let res = if intrinsic_name.starts_with("min") {
302 this.write_scalar(Scalar::from_f32(res), dest)?;
305 "minnumf64" | "maxnumf64" => {
306 let a = this.read_scalar(args[0])?.to_f64()?;
307 let b = this.read_scalar(args[1])?.to_f64()?;
308 let res = if intrinsic_name.starts_with("min") {
313 this.write_scalar(Scalar::from_f64(res), dest)?;
318 this.read_immediate(args[0])?,
319 this.read_immediate(args[1])?,
325 "likely" | "unlikely" => {
326 // These just return their argument
327 let b = this.read_immediate(args[0])?;
328 this.write_immediate(*b, dest)?;
332 // Check fast path: we don't want to force an allocation in case the destination is a simple value,
333 // but we also do not want to create a new allocation with 0s and then copy that over.
334 // FIXME: We do not properly validate in case of ZSTs and when doing it in memory!
335 // However, this only affects direct calls of the intrinsic; calls to the stable
336 // functions wrapping them do get their validation.
337 // FIXME: should we check that the destination pointer is aligned even for ZSTs?
338 if !dest.layout.is_zst() {
339 match dest.layout.abi {
340 layout::Abi::Scalar(ref s) => {
341 let x = Scalar::from_int(0, s.value.size(this));
342 this.write_scalar(x, dest)?;
344 layout::Abi::ScalarPair(ref s1, ref s2) => {
345 let x = Scalar::from_int(0, s1.value.size(this));
346 let y = Scalar::from_int(0, s2.value.size(this));
347 this.write_immediate(Immediate::ScalarPair(x.into(), y.into()), dest)?;
351 let mplace = this.force_allocation(dest)?;
352 mplace.meta.unwrap_none(); // must be sized
353 this.memory.write_bytes(mplace.ptr, iter::repeat(0u8).take(dest.layout.size.bytes() as usize))?;
360 let ty = substs.type_at(0);
361 let layout = this.layout_of(ty)?;
362 let align = layout.align.pref.bytes();
363 let ptr_size = this.pointer_size();
364 let align_val = Scalar::from_uint(align as u128, ptr_size);
365 this.write_scalar(align_val, dest)?;
369 let place = this.deref_operand(args[0])?;
370 this.copy_op(args[1], place.into())?;
374 let offset = this.read_scalar(args[1])?.to_machine_isize(this)?;
375 let ptr = this.read_scalar(args[0])?.not_undef()?;
376 let result_ptr = this.pointer_offset_inbounds(ptr, substs.type_at(0), offset)?;
377 this.write_scalar(result_ptr, dest)?;
380 "panic_if_uninhabited" => {
381 let ty = substs.type_at(0);
382 let layout = this.layout_of(ty)?;
383 if layout.abi.is_uninhabited() {
384 // FIXME: This should throw a panic in the interpreted program instead.
385 throw_unsup_format!("Trying to instantiate uninhabited type {}", ty)
390 // FIXME: Using host floats.
391 let f = f32::from_bits(this.read_scalar(args[0])?.to_u32()?);
392 let f2 = f32::from_bits(this.read_scalar(args[1])?.to_u32()?);
394 Scalar::from_u32(f.powf(f2).to_bits()),
400 // FIXME: Using host floats.
401 let f = f64::from_bits(this.read_scalar(args[0])?.to_u64()?);
402 let f2 = f64::from_bits(this.read_scalar(args[1])?.to_u64()?);
404 Scalar::from_u64(f.powf(f2).to_bits()),
410 let a = this.read_scalar(args[0])?.to_f32()?;
411 let b = this.read_scalar(args[1])?.to_f32()?;
412 let c = this.read_scalar(args[2])?.to_f32()?;
413 let res = a.mul_add(b, c).value;
415 Scalar::from_f32(res),
421 let a = this.read_scalar(args[0])?.to_f64()?;
422 let b = this.read_scalar(args[1])?.to_f64()?;
423 let c = this.read_scalar(args[2])?.to_f64()?;
424 let res = a.mul_add(b, c).value;
426 Scalar::from_f64(res),
432 // FIXME: Using host floats.
433 let f = f32::from_bits(this.read_scalar(args[0])?.to_u32()?);
434 let i = this.read_scalar(args[1])?.to_i32()?;
436 Scalar::from_u32(f.powi(i).to_bits()),
442 // FIXME: Using host floats.
443 let f = f64::from_bits(this.read_scalar(args[0])?.to_u64()?);
444 let i = this.read_scalar(args[1])?.to_i32()?;
446 Scalar::from_u64(f.powi(i).to_bits()),
452 let mplace = this.deref_operand(args[0])?;
453 let (size, _) = this.size_and_align_of_mplace(mplace)?
454 .expect("size_of_val called on extern type");
455 let ptr_size = this.pointer_size();
457 Scalar::from_uint(size.bytes() as u128, ptr_size),
464 let mplace = this.deref_operand(args[0])?;
465 let (_, align) = this.size_and_align_of_mplace(mplace)?
466 .expect("size_of_val called on extern type");
467 let ptr_size = this.pointer_size();
469 Scalar::from_uint(align.bytes(), ptr_size),
475 let l = this.read_immediate(args[0])?;
476 let r = this.read_immediate(args[1])?;
477 let rval = r.to_scalar()?.to_bits(args[1].layout.size)?;
479 throw_ub_format!("Division by 0 in unchecked_div");
481 this.binop_ignore_overflow(
490 let l = this.read_immediate(args[0])?;
491 let r = this.read_immediate(args[1])?;
492 let rval = r.to_scalar()?.to_bits(args[1].layout.size)?;
494 throw_ub_format!("Division by 0 in unchecked_rem");
496 this.binop_ignore_overflow(
504 "unchecked_add" | "unchecked_sub" | "unchecked_mul" => {
505 let l = this.read_immediate(args[0])?;
506 let r = this.read_immediate(args[1])?;
507 let op = match intrinsic_name {
508 "unchecked_add" => mir::BinOp::Add,
509 "unchecked_sub" => mir::BinOp::Sub,
510 "unchecked_mul" => mir::BinOp::Mul,
513 let (res, overflowed, _ty) = this.overflowing_binary_op(op, l, r)?;
515 throw_ub_format!("Overflowing arithmetic in {}", intrinsic_name);
517 this.write_scalar(res, dest)?;
521 // Check fast path: we don't want to force an allocation in case the destination is a simple value,
522 // but we also do not want to create a new allocation with 0s and then copy that over.
523 // FIXME: We do not properly validate in case of ZSTs and when doing it in memory!
524 // However, this only affects direct calls of the intrinsic; calls to the stable
525 // functions wrapping them do get their validation.
526 // FIXME: should we check alignment for ZSTs?
527 use crate::ScalarMaybeUndef;
528 if !dest.layout.is_zst() {
529 match dest.layout.abi {
530 layout::Abi::Scalar(..) => {
531 let x = ScalarMaybeUndef::Undef;
532 this.write_immediate(Immediate::Scalar(x), dest)?;
534 layout::Abi::ScalarPair(..) => {
535 let x = ScalarMaybeUndef::Undef;
536 this.write_immediate(Immediate::ScalarPair(x, x), dest)?;
540 let mplace = this.force_allocation(dest)?;
541 mplace.meta.unwrap_none();
542 let ptr = mplace.ptr.to_ptr()?;
543 // We know the return place is in-bounds
545 .get_raw_mut(ptr.alloc_id)?
546 .mark_definedness(ptr, dest.layout.size, false);
553 let ty = substs.type_at(0);
554 let ty_layout = this.layout_of(ty)?;
555 let val_byte = this.read_scalar(args[1])?.to_u8()?;
556 let ptr = this.read_scalar(args[0])?.not_undef()?;
557 let count = this.read_scalar(args[2])?.to_machine_usize(this)?;
558 let byte_count = ty_layout.size * count;
559 this.memory.write_bytes(ptr, iter::repeat(val_byte).take(byte_count.bytes() as usize))?;
562 name => throw_unsup_format!("unimplemented intrinsic: {}", name),