2 use std::convert::TryFrom;
5 use rustc::mir::interpret::{InterpResult, PointerArithmetic};
7 use rustc::ty::layout::{Align, LayoutOf, Size};
8 use rustc_apfloat::Float;
9 use rustc_span::source_map::Span;
13 impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
14 pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
18 instance: ty::Instance<'tcx>,
19 args: &[OpTy<'tcx, Tag>],
20 ret: Option<(PlaceTy<'tcx, Tag>, mir::BasicBlock)>,
21 unwind: Option<mir::BasicBlock>,
22 ) -> InterpResult<'tcx> {
23 let this = self.eval_context_mut();
24 if this.emulate_intrinsic(span, instance, args, ret)? {
27 let tcx = &{ this.tcx.tcx };
28 let substs = instance.substs;
30 // All these intrinsics take raw pointers, so if we access memory directly
31 // (as opposed to through a place), we have to remember to erase any tag
32 // that might still hang around!
33 let intrinsic_name = &*tcx.item_name(instance.def_id()).as_str();
35 // First handle intrinsics without return place.
36 let (dest, ret) = match ret {
37 None => match intrinsic_name {
38 "miri_start_panic" => return this.handle_miri_start_panic(args, unwind),
39 _ => throw_unsup_format!("unimplemented (diverging) intrinsic: {}", intrinsic_name),
44 match intrinsic_name {
45 "try" => return this.handle_try(args, dest, ret),
48 let offset = this.read_scalar(args[1])?.to_machine_isize(this)?;
49 let ptr = this.read_scalar(args[0])?.not_undef()?;
51 let pointee_ty = substs.type_at(0);
52 let pointee_size = i64::try_from(this.layout_of(pointee_ty)?.size.bytes()).unwrap();
53 let offset = offset.overflowing_mul(pointee_size).0;
54 let result_ptr = ptr.ptr_wrapping_signed_offset(offset, this);
55 this.write_scalar(result_ptr, dest)?;
59 let cond = this.read_scalar(args[0])?.to_bool()?;
61 throw_ub_format!("`assume` intrinsic called with `false`");
66 let place = this.deref_operand(args[0])?;
67 this.copy_op(place.into(), dest)?;
71 let place = this.deref_operand(args[0])?;
72 this.copy_op(args[1], place.into())?;
77 | "atomic_load_relaxed"
80 let place = this.deref_operand(args[0])?;
81 let val = this.read_scalar(place.into())?; // make sure it fits into a scalar; otherwise it cannot be atomic
83 // Check alignment requirements. Atomics must always be aligned to their size,
84 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
86 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
87 this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
89 this.write_scalar(val, dest)?;
94 | "atomic_store_relaxed"
97 let place = this.deref_operand(args[0])?;
98 let val = this.read_scalar(args[1])?; // make sure it fits into a scalar; otherwise it cannot be atomic
100 // Check alignment requirements. Atomics must always be aligned to their size,
101 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
103 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
104 this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
106 this.write_scalar(val, place.into())?;
112 | "atomic_fence_acqrel"
114 | "atomic_singlethreadfence_acq"
115 | "atomic_singlethreadfence_rel"
116 | "atomic_singlethreadfence_acqrel"
117 | "atomic_singlethreadfence"
119 // we are inherently singlethreaded and singlecored, this is a nop
122 _ if intrinsic_name.starts_with("atomic_xchg") => {
123 let place = this.deref_operand(args[0])?;
124 let new = this.read_scalar(args[1])?;
125 let old = this.read_scalar(place.into())?;
127 // Check alignment requirements. Atomics must always be aligned to their size,
128 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
130 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
131 this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
133 this.write_scalar(old, dest)?; // old value is returned
134 this.write_scalar(new, place.into())?;
137 _ if intrinsic_name.starts_with("atomic_cxchg") => {
138 let place = this.deref_operand(args[0])?;
139 let expect_old = this.read_immediate(args[1])?; // read as immediate for the sake of `binary_op()`
140 let new = this.read_scalar(args[2])?;
141 let old = this.read_immediate(place.into())?; // read as immediate for the sake of `binary_op()`
143 // Check alignment requirements. Atomics must always be aligned to their size,
144 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
146 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
147 this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
149 // `binary_op` will bail if either of them is not a scalar.
150 let eq = this.overflowing_binary_op(mir::BinOp::Eq, old, expect_old)?.0;
151 let res = Immediate::ScalarPair(old.to_scalar_or_undef(), eq.into());
153 this.write_immediate(res, dest)?;
154 // Update ptr depending on comparison.
156 this.write_scalar(new, place.into())?;
165 | "atomic_or_relaxed"
169 | "atomic_xor_acqrel"
170 | "atomic_xor_relaxed"
174 | "atomic_and_acqrel"
175 | "atomic_and_relaxed"
179 | "atomic_nand_acqrel"
180 | "atomic_nand_relaxed"
184 | "atomic_xadd_acqrel"
185 | "atomic_xadd_relaxed"
189 | "atomic_xsub_acqrel"
190 | "atomic_xsub_relaxed"
192 let place = this.deref_operand(args[0])?;
193 if !place.layout.ty.is_integral() {
194 bug!("Atomic arithmetic operations only work on integer types");
196 let rhs = this.read_immediate(args[1])?;
197 let old = this.read_immediate(place.into())?;
199 // Check alignment requirements. Atomics must always be aligned to their size,
200 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
202 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
203 this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
205 this.write_immediate(*old, dest)?; // old value is returned
206 let (op, neg) = match intrinsic_name.split('_').nth(1).unwrap() {
207 "or" => (mir::BinOp::BitOr, false),
208 "xor" => (mir::BinOp::BitXor, false),
209 "and" => (mir::BinOp::BitAnd, false),
210 "xadd" => (mir::BinOp::Add, false),
211 "xsub" => (mir::BinOp::Sub, false),
212 "nand" => (mir::BinOp::BitAnd, true),
215 // Atomics wrap around on overflow.
216 let val = this.binary_op(op, old, rhs)?;
217 let val = if neg { this.unary_op(mir::UnOp::Not, val)? } else { val };
218 this.write_immediate(*val, place.into())?;
221 "breakpoint" => unimplemented!(), // halt miri
225 | "copy_nonoverlapping"
227 let elem_ty = substs.type_at(0);
228 let elem_layout = this.layout_of(elem_ty)?;
229 let elem_size = elem_layout.size.bytes();
230 let count = this.read_scalar(args[2])?.to_machine_usize(this)?;
231 let elem_align = elem_layout.align.abi;
233 let size = Size::from_bytes(count) * elem_size;
234 let src = this.read_scalar(args[0])?.not_undef()?;
235 let src = this.memory.check_ptr_access(src, size, elem_align)?;
236 let dest = this.read_scalar(args[1])?.not_undef()?;
237 let dest = this.memory.check_ptr_access(dest, size, elem_align)?;
239 if let (Some(src), Some(dest)) = (src, dest) {
244 intrinsic_name.ends_with("_nonoverlapping"),
264 // FIXME: Using host floats.
265 let f = f32::from_bits(this.read_scalar(args[0])?.to_u32()?);
266 let f = match intrinsic_name {
268 "fabsf32" => f.abs(),
270 "sqrtf32" => f.sqrt(),
272 "exp2f32" => f.exp2(),
274 "log10f32" => f.log10(),
275 "log2f32" => f.log2(),
276 "floorf32" => f.floor(),
277 "ceilf32" => f.ceil(),
278 "truncf32" => f.trunc(),
279 "roundf32" => f.round(),
282 this.write_scalar(Scalar::from_u32(f.to_bits()), dest)?;
300 // FIXME: Using host floats.
301 let f = f64::from_bits(this.read_scalar(args[0])?.to_u64()?);
302 let f = match intrinsic_name {
304 "fabsf64" => f.abs(),
306 "sqrtf64" => f.sqrt(),
308 "exp2f64" => f.exp2(),
310 "log10f64" => f.log10(),
311 "log2f64" => f.log2(),
312 "floorf64" => f.floor(),
313 "ceilf64" => f.ceil(),
314 "truncf64" => f.trunc(),
315 "roundf64" => f.round(),
318 this.write_scalar(Scalar::from_u64(f.to_bits()), dest)?;
328 let a = this.read_immediate(args[0])?;
329 let b = this.read_immediate(args[1])?;
330 let op = match intrinsic_name {
331 "fadd_fast" => mir::BinOp::Add,
332 "fsub_fast" => mir::BinOp::Sub,
333 "fmul_fast" => mir::BinOp::Mul,
334 "fdiv_fast" => mir::BinOp::Div,
335 "frem_fast" => mir::BinOp::Rem,
338 this.binop_ignore_overflow(op, a, b, dest)?;
346 let a = this.read_scalar(args[0])?.to_f32()?;
347 let b = this.read_scalar(args[1])?.to_f32()?;
348 let res = match intrinsic_name {
349 "minnumf32" => a.min(b),
350 "maxnumf32" => a.max(b),
351 "copysignf32" => a.copy_sign(b),
354 this.write_scalar(Scalar::from_f32(res), dest)?;
362 let a = this.read_scalar(args[0])?.to_f64()?;
363 let b = this.read_scalar(args[1])?.to_f64()?;
364 let res = match intrinsic_name {
365 "minnumf64" => a.min(b),
366 "maxnumf64" => a.max(b),
367 "copysignf64" => a.copy_sign(b),
370 this.write_scalar(Scalar::from_f64(res), dest)?;
374 this.exact_div(this.read_immediate(args[0])?, this.read_immediate(args[1])?, dest)?,
382 // These just return their argument
383 let b = this.read_immediate(args[0])?;
384 this.write_immediate(*b, dest)?;
388 let ty = substs.type_at(0);
389 let layout = this.layout_of(ty)?;
390 let align = layout.align.pref.bytes();
391 let ptr_size = this.pointer_size();
392 let align_val = Scalar::from_uint(align, ptr_size);
393 this.write_scalar(align_val, dest)?;
397 let place = this.deref_operand(args[0])?;
398 this.copy_op(args[1], place.into())?;
402 let offset = this.read_scalar(args[1])?.to_machine_isize(this)?;
403 let ptr = this.read_scalar(args[0])?.not_undef()?;
404 let result_ptr = this.pointer_offset_inbounds(ptr, substs.type_at(0), offset)?;
405 this.write_scalar(result_ptr, dest)?;
409 "assert_zero_valid" |
410 "assert_uninit_valid" => {
411 let ty = substs.type_at(0);
412 let layout = this.layout_of(ty)?;
413 // Return here because we panicked instead of returning normally from the intrinsic.
414 if layout.abi.is_uninhabited() {
415 return this.start_panic(&format!("attempted to instantiate uninhabited type `{}`", ty), unwind);
417 if intrinsic_name == "assert_zero_valid" && !layout.might_permit_raw_init(this, /*zero:*/ true).unwrap() {
418 return this.start_panic(&format!("attempted to zero-initialize type `{}`, which is invalid", ty), unwind);
420 if intrinsic_name == "assert_uninit_valid" && !layout.might_permit_raw_init(this, /*zero:*/ false).unwrap() {
421 return this.start_panic(&format!("attempted to leave type `{}` uninitialized, which is invalid", ty), unwind);
426 // FIXME: Using host floats.
427 let f = f32::from_bits(this.read_scalar(args[0])?.to_u32()?);
428 let f2 = f32::from_bits(this.read_scalar(args[1])?.to_u32()?);
429 this.write_scalar(Scalar::from_u32(f.powf(f2).to_bits()), dest)?;
433 // FIXME: Using host floats.
434 let f = f64::from_bits(this.read_scalar(args[0])?.to_u64()?);
435 let f2 = f64::from_bits(this.read_scalar(args[1])?.to_u64()?);
436 this.write_scalar(Scalar::from_u64(f.powf(f2).to_bits()), dest)?;
440 let a = this.read_scalar(args[0])?.to_f32()?;
441 let b = this.read_scalar(args[1])?.to_f32()?;
442 let c = this.read_scalar(args[2])?.to_f32()?;
443 let res = a.mul_add(b, c).value;
444 this.write_scalar(Scalar::from_f32(res), dest)?;
448 let a = this.read_scalar(args[0])?.to_f64()?;
449 let b = this.read_scalar(args[1])?.to_f64()?;
450 let c = this.read_scalar(args[2])?.to_f64()?;
451 let res = a.mul_add(b, c).value;
452 this.write_scalar(Scalar::from_f64(res), dest)?;
456 // FIXME: Using host floats.
457 let f = f32::from_bits(this.read_scalar(args[0])?.to_u32()?);
458 let i = this.read_scalar(args[1])?.to_i32()?;
459 this.write_scalar(Scalar::from_u32(f.powi(i).to_bits()), dest)?;
463 // FIXME: Using host floats.
464 let f = f64::from_bits(this.read_scalar(args[0])?.to_u64()?);
465 let i = this.read_scalar(args[1])?.to_i32()?;
466 this.write_scalar(Scalar::from_u64(f.powi(i).to_bits()), dest)?;
470 let mplace = this.deref_operand(args[0])?;
472 .size_and_align_of_mplace(mplace)?
473 .expect("size_of_val called on extern type");
474 let ptr_size = this.pointer_size();
475 this.write_scalar(Scalar::from_uint(size.bytes(), ptr_size), dest)?;
482 let mplace = this.deref_operand(args[0])?;
483 let (_, align) = this
484 .size_and_align_of_mplace(mplace)?
485 .expect("size_of_val called on extern type");
486 let ptr_size = this.pointer_size();
487 this.write_scalar(Scalar::from_uint(align.bytes(), ptr_size), dest)?;
491 let ty = substs.type_at(0);
492 let ty_layout = this.layout_of(ty)?;
493 let val_byte = this.read_scalar(args[1])?.to_u8()?;
494 let ptr = this.read_scalar(args[0])?.not_undef()?;
495 let count = this.read_scalar(args[2])?.to_machine_usize(this)?;
496 let byte_count = ty_layout.size * count;
498 .write_bytes(ptr, iter::repeat(val_byte).take(byte_count.bytes() as usize))?;
501 name => throw_unsup_format!("unimplemented intrinsic: {}", name),
504 this.dump_place(*dest);
505 this.go_to_block(ret);