2 use std::convert::TryFrom;
5 use rustc::mir::interpret::{InterpResult, PointerArithmetic};
7 use rustc::ty::layout::{Align, LayoutOf};
8 use rustc_apfloat::Float;
9 use rustc_span::source_map::Span;
13 impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
14 pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
18 instance: ty::Instance<'tcx>,
19 args: &[OpTy<'tcx, Tag>],
20 ret: Option<(PlaceTy<'tcx, Tag>, mir::BasicBlock)>,
21 unwind: Option<mir::BasicBlock>,
22 ) -> InterpResult<'tcx> {
23 let this = self.eval_context_mut();
24 if this.emulate_intrinsic(span, instance, args, ret)? {
27 let tcx = &{ this.tcx.tcx };
28 let substs = instance.substs;
30 // All these intrinsics take raw pointers, so if we access memory directly
31 // (as opposed to through a place), we have to remember to erase any tag
32 // that might still hang around!
33 let intrinsic_name = &*tcx.item_name(instance.def_id()).as_str();
35 // First handle intrinsics without return place.
36 let (dest, ret) = match ret {
37 None => match intrinsic_name {
38 "miri_start_panic" => return this.handle_miri_start_panic(args, unwind),
39 "unreachable" => throw_ub!(Unreachable),
40 _ => throw_unsup_format!("unimplemented (diverging) intrinsic: {}", intrinsic_name),
45 match intrinsic_name {
46 "try" => return this.handle_try(args, dest, ret),
49 let offset = this.read_scalar(args[1])?.to_machine_isize(this)?;
50 let ptr = this.read_scalar(args[0])?.not_undef()?;
52 let pointee_ty = substs.type_at(0);
53 let pointee_size = i64::try_from(this.layout_of(pointee_ty)?.size.bytes()).unwrap();
54 let offset = offset.overflowing_mul(pointee_size).0;
55 let result_ptr = ptr.ptr_wrapping_signed_offset(offset, this);
56 this.write_scalar(result_ptr, dest)?;
60 let cond = this.read_scalar(args[0])?.to_bool()?;
62 throw_ub_format!("`assume` intrinsic called with `false`");
67 let place = this.deref_operand(args[0])?;
68 this.copy_op(place.into(), dest)?;
72 let place = this.deref_operand(args[0])?;
73 this.copy_op(args[1], place.into())?;
78 | "atomic_load_relaxed"
81 let place = this.deref_operand(args[0])?;
82 let val = this.read_scalar(place.into())?; // make sure it fits into a scalar; otherwise it cannot be atomic
84 // Check alignment requirements. Atomics must always be aligned to their size,
85 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
87 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
88 this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
90 this.write_scalar(val, dest)?;
95 | "atomic_store_relaxed"
98 let place = this.deref_operand(args[0])?;
99 let val = this.read_scalar(args[1])?; // make sure it fits into a scalar; otherwise it cannot be atomic
101 // Check alignment requirements. Atomics must always be aligned to their size,
102 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
104 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
105 this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
107 this.write_scalar(val, place.into())?;
113 | "atomic_fence_acqrel"
115 | "atomic_singlethreadfence_acq"
116 | "atomic_singlethreadfence_rel"
117 | "atomic_singlethreadfence_acqrel"
118 | "atomic_singlethreadfence"
120 // we are inherently singlethreaded and singlecored, this is a nop
123 _ if intrinsic_name.starts_with("atomic_xchg") => {
124 let place = this.deref_operand(args[0])?;
125 let new = this.read_scalar(args[1])?;
126 let old = this.read_scalar(place.into())?;
128 // Check alignment requirements. Atomics must always be aligned to their size,
129 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
131 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
132 this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
134 this.write_scalar(old, dest)?; // old value is returned
135 this.write_scalar(new, place.into())?;
138 _ if intrinsic_name.starts_with("atomic_cxchg") => {
139 let place = this.deref_operand(args[0])?;
140 let expect_old = this.read_immediate(args[1])?; // read as immediate for the sake of `binary_op()`
141 let new = this.read_scalar(args[2])?;
142 let old = this.read_immediate(place.into())?; // read as immediate for the sake of `binary_op()`
144 // Check alignment requirements. Atomics must always be aligned to their size,
145 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
147 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
148 this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
150 // `binary_op` will bail if either of them is not a scalar.
151 let eq = this.overflowing_binary_op(mir::BinOp::Eq, old, expect_old)?.0;
152 let res = Immediate::ScalarPair(old.to_scalar_or_undef(), eq.into());
154 this.write_immediate(res, dest)?;
155 // Update ptr depending on comparison.
157 this.write_scalar(new, place.into())?;
166 | "atomic_or_relaxed"
170 | "atomic_xor_acqrel"
171 | "atomic_xor_relaxed"
175 | "atomic_and_acqrel"
176 | "atomic_and_relaxed"
180 | "atomic_nand_acqrel"
181 | "atomic_nand_relaxed"
185 | "atomic_xadd_acqrel"
186 | "atomic_xadd_relaxed"
190 | "atomic_xsub_acqrel"
191 | "atomic_xsub_relaxed"
193 let place = this.deref_operand(args[0])?;
194 if !place.layout.ty.is_integral() {
195 bug!("Atomic arithmetic operations only work on integer types");
197 let rhs = this.read_immediate(args[1])?;
198 let old = this.read_immediate(place.into())?;
200 // Check alignment requirements. Atomics must always be aligned to their size,
201 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
203 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
204 this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
206 this.write_immediate(*old, dest)?; // old value is returned
207 let (op, neg) = match intrinsic_name.split('_').nth(1).unwrap() {
208 "or" => (mir::BinOp::BitOr, false),
209 "xor" => (mir::BinOp::BitXor, false),
210 "and" => (mir::BinOp::BitAnd, false),
211 "xadd" => (mir::BinOp::Add, false),
212 "xsub" => (mir::BinOp::Sub, false),
213 "nand" => (mir::BinOp::BitAnd, true),
216 // Atomics wrap around on overflow.
217 let val = this.binary_op(op, old, rhs)?;
218 let val = if neg { this.unary_op(mir::UnOp::Not, val)? } else { val };
219 this.write_immediate(*val, place.into())?;
222 "breakpoint" => unimplemented!(), // halt miri
226 | "copy_nonoverlapping"
228 let elem_ty = substs.type_at(0);
229 let elem_layout = this.layout_of(elem_ty)?;
230 let count = this.read_scalar(args[2])?.to_machine_usize(this)?;
231 let elem_align = elem_layout.align.abi;
233 let size = elem_layout.size.checked_mul(count, this)
234 .ok_or_else(|| err_ub_format!("overflow computing total size of `{}`", intrinsic_name))?;
235 let src = this.read_scalar(args[0])?.not_undef()?;
236 let src = this.memory.check_ptr_access(src, size, elem_align)?;
237 let dest = this.read_scalar(args[1])?.not_undef()?;
238 let dest = this.memory.check_ptr_access(dest, size, elem_align)?;
240 if let (Some(src), Some(dest)) = (src, dest) {
245 intrinsic_name.ends_with("_nonoverlapping"),
265 // FIXME: Using host floats.
266 let f = f32::from_bits(this.read_scalar(args[0])?.to_u32()?);
267 let f = match intrinsic_name {
269 "fabsf32" => f.abs(),
271 "sqrtf32" => f.sqrt(),
273 "exp2f32" => f.exp2(),
275 "log10f32" => f.log10(),
276 "log2f32" => f.log2(),
277 "floorf32" => f.floor(),
278 "ceilf32" => f.ceil(),
279 "truncf32" => f.trunc(),
280 "roundf32" => f.round(),
283 this.write_scalar(Scalar::from_u32(f.to_bits()), dest)?;
301 // FIXME: Using host floats.
302 let f = f64::from_bits(this.read_scalar(args[0])?.to_u64()?);
303 let f = match intrinsic_name {
305 "fabsf64" => f.abs(),
307 "sqrtf64" => f.sqrt(),
309 "exp2f64" => f.exp2(),
311 "log10f64" => f.log10(),
312 "log2f64" => f.log2(),
313 "floorf64" => f.floor(),
314 "ceilf64" => f.ceil(),
315 "truncf64" => f.trunc(),
316 "roundf64" => f.round(),
319 this.write_scalar(Scalar::from_u64(f.to_bits()), dest)?;
329 let a = this.read_immediate(args[0])?;
330 let b = this.read_immediate(args[1])?;
331 let op = match intrinsic_name {
332 "fadd_fast" => mir::BinOp::Add,
333 "fsub_fast" => mir::BinOp::Sub,
334 "fmul_fast" => mir::BinOp::Mul,
335 "fdiv_fast" => mir::BinOp::Div,
336 "frem_fast" => mir::BinOp::Rem,
339 this.binop_ignore_overflow(op, a, b, dest)?;
347 let a = this.read_scalar(args[0])?.to_f32()?;
348 let b = this.read_scalar(args[1])?.to_f32()?;
349 let res = match intrinsic_name {
350 "minnumf32" => a.min(b),
351 "maxnumf32" => a.max(b),
352 "copysignf32" => a.copy_sign(b),
355 this.write_scalar(Scalar::from_f32(res), dest)?;
363 let a = this.read_scalar(args[0])?.to_f64()?;
364 let b = this.read_scalar(args[1])?.to_f64()?;
365 let res = match intrinsic_name {
366 "minnumf64" => a.min(b),
367 "maxnumf64" => a.max(b),
368 "copysignf64" => a.copy_sign(b),
371 this.write_scalar(Scalar::from_f64(res), dest)?;
375 this.exact_div(this.read_immediate(args[0])?, this.read_immediate(args[1])?, dest)?,
383 // These just return their argument
384 let b = this.read_immediate(args[0])?;
385 this.write_immediate(*b, dest)?;
389 let ty = substs.type_at(0);
390 let layout = this.layout_of(ty)?;
391 let align = layout.align.pref.bytes();
392 let ptr_size = this.pointer_size();
393 let align_val = Scalar::from_uint(align, ptr_size);
394 this.write_scalar(align_val, dest)?;
398 let place = this.deref_operand(args[0])?;
399 this.copy_op(args[1], place.into())?;
403 let offset = this.read_scalar(args[1])?.to_machine_isize(this)?;
404 let ptr = this.read_scalar(args[0])?.not_undef()?;
405 let result_ptr = this.pointer_offset_inbounds(ptr, substs.type_at(0), offset)?;
406 this.write_scalar(result_ptr, dest)?;
410 "assert_zero_valid" |
411 "assert_uninit_valid" => {
412 let ty = substs.type_at(0);
413 let layout = this.layout_of(ty)?;
414 // Abort here because the caller might not be panic safe.
415 if layout.abi.is_uninhabited() {
416 throw_machine_stop!(TerminationInfo::Abort(Some(format!("attempted to instantiate uninhabited type `{}`", ty))))
418 if intrinsic_name == "assert_zero_valid" && !layout.might_permit_raw_init(this, /*zero:*/ true).unwrap() {
419 throw_machine_stop!(TerminationInfo::Abort(Some(format!("attempted to zero-initialize type `{}`, which is invalid", ty))))
421 if intrinsic_name == "assert_uninit_valid" && !layout.might_permit_raw_init(this, /*zero:*/ false).unwrap() {
422 throw_machine_stop!(TerminationInfo::Abort(Some(format!("attempted to leave type `{}` uninitialized, which is invalid", ty))))
427 // FIXME: Using host floats.
428 let f = f32::from_bits(this.read_scalar(args[0])?.to_u32()?);
429 let f2 = f32::from_bits(this.read_scalar(args[1])?.to_u32()?);
430 this.write_scalar(Scalar::from_u32(f.powf(f2).to_bits()), dest)?;
434 // FIXME: Using host floats.
435 let f = f64::from_bits(this.read_scalar(args[0])?.to_u64()?);
436 let f2 = f64::from_bits(this.read_scalar(args[1])?.to_u64()?);
437 this.write_scalar(Scalar::from_u64(f.powf(f2).to_bits()), dest)?;
441 let a = this.read_scalar(args[0])?.to_f32()?;
442 let b = this.read_scalar(args[1])?.to_f32()?;
443 let c = this.read_scalar(args[2])?.to_f32()?;
444 let res = a.mul_add(b, c).value;
445 this.write_scalar(Scalar::from_f32(res), dest)?;
449 let a = this.read_scalar(args[0])?.to_f64()?;
450 let b = this.read_scalar(args[1])?.to_f64()?;
451 let c = this.read_scalar(args[2])?.to_f64()?;
452 let res = a.mul_add(b, c).value;
453 this.write_scalar(Scalar::from_f64(res), dest)?;
457 // FIXME: Using host floats.
458 let f = f32::from_bits(this.read_scalar(args[0])?.to_u32()?);
459 let i = this.read_scalar(args[1])?.to_i32()?;
460 this.write_scalar(Scalar::from_u32(f.powi(i).to_bits()), dest)?;
464 // FIXME: Using host floats.
465 let f = f64::from_bits(this.read_scalar(args[0])?.to_u64()?);
466 let i = this.read_scalar(args[1])?.to_i32()?;
467 this.write_scalar(Scalar::from_u64(f.powi(i).to_bits()), dest)?;
471 let mplace = this.deref_operand(args[0])?;
473 .size_and_align_of_mplace(mplace)?
474 .expect("size_of_val called on extern type");
475 let ptr_size = this.pointer_size();
476 this.write_scalar(Scalar::from_uint(size.bytes(), ptr_size), dest)?;
483 let mplace = this.deref_operand(args[0])?;
484 let (_, align) = this
485 .size_and_align_of_mplace(mplace)?
486 .expect("size_of_val called on extern type");
487 let ptr_size = this.pointer_size();
488 this.write_scalar(Scalar::from_uint(align.bytes(), ptr_size), dest)?;
492 let ty = substs.type_at(0);
493 let ty_layout = this.layout_of(ty)?;
494 let val_byte = this.read_scalar(args[1])?.to_u8()?;
495 let ptr = this.read_scalar(args[0])?.not_undef()?;
496 let count = this.read_scalar(args[2])?.to_machine_usize(this)?;
497 let byte_count = ty_layout.size.checked_mul(count, this)
498 .ok_or_else(|| err_ub_format!("overflow computing total size of `write_bytes`"))?;
500 .write_bytes(ptr, iter::repeat(val_byte).take(byte_count.bytes() as usize))?;
503 name => throw_unsup_format!("unimplemented intrinsic: {}", name),
506 this.dump_place(*dest);
507 this.go_to_block(ret);