2 use std::convert::TryFrom;
4 use rustc_ast::ast::FloatTy;
5 use rustc_middle::{mir, ty};
6 use rustc_apfloat::{Float, Round};
7 use rustc_target::abi::{Align, LayoutOf, Size};
10 use helpers::check_arg_count;
12 impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
13 pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
16 instance: ty::Instance<'tcx>,
17 args: &[OpTy<'tcx, Tag>],
18 ret: Option<(PlaceTy<'tcx, Tag>, mir::BasicBlock)>,
19 unwind: Option<mir::BasicBlock>,
20 ) -> InterpResult<'tcx> {
21 let this = self.eval_context_mut();
22 if this.emulate_intrinsic(instance, args, ret)? {
25 let substs = instance.substs;
27 // All these intrinsics take raw pointers, so if we access memory directly
28 // (as opposed to through a place), we have to remember to erase any tag
29 // that might still hang around!
30 let intrinsic_name = &*this.tcx.item_name(instance.def_id()).as_str();
32 // First handle intrinsics without return place.
33 let (dest, ret) = match ret {
34 None => match intrinsic_name {
35 "miri_start_panic" => return this.handle_miri_start_panic(args, unwind),
36 "unreachable" => throw_ub!(Unreachable),
37 _ => throw_unsup_format!("unimplemented (diverging) intrinsic: {}", intrinsic_name),
42 // Then handle terminating intrinsics.
43 match intrinsic_name {
44 // Raw memory accesses
47 | "copy_nonoverlapping"
49 let &[src, dest, count] = check_arg_count(args)?;
50 let elem_ty = substs.type_at(0);
51 let elem_layout = this.layout_of(elem_ty)?;
52 let count = this.read_scalar(count)?.to_machine_usize(this)?;
53 let elem_align = elem_layout.align.abi;
55 let size = elem_layout.size.checked_mul(count, this)
56 .ok_or_else(|| err_ub_format!("overflow computing total size of `{}`", intrinsic_name))?;
57 let src = this.read_scalar(src)?.not_undef()?;
58 let src = this.memory.check_ptr_access(src, size, elem_align)?;
59 let dest = this.read_scalar(dest)?.not_undef()?;
60 let dest = this.memory.check_ptr_access(dest, size, elem_align)?;
62 if let (Some(src), Some(dest)) = (src, dest) {
67 intrinsic_name.ends_with("_nonoverlapping"),
73 let &[place, dest] = check_arg_count(args)?;
74 let place = this.deref_operand(place)?;
75 this.copy_op(dest, place.into())?;
79 let &[place] = check_arg_count(args)?;
80 let place = this.deref_operand(place)?;
81 this.copy_op(place.into(), dest)?;
84 let &[place, dest] = check_arg_count(args)?;
85 let place = this.deref_operand(place)?;
86 this.copy_op(dest, place.into())?;
90 let &[ptr, val_byte, count] = check_arg_count(args)?;
91 let ty = substs.type_at(0);
92 let ty_layout = this.layout_of(ty)?;
93 let val_byte = this.read_scalar(val_byte)?.to_u8()?;
94 let ptr = this.read_scalar(ptr)?.not_undef()?;
95 let count = this.read_scalar(count)?.to_machine_usize(this)?;
96 let byte_count = ty_layout.size.checked_mul(count, this)
97 .ok_or_else(|| err_ub_format!("overflow computing total size of `write_bytes`"))?;
99 .write_bytes(ptr, iter::repeat(val_byte).take(byte_count.bytes() as usize))?;
102 // Pointer arithmetic
104 let &[ptr, offset] = check_arg_count(args)?;
105 let ptr = this.read_scalar(ptr)?.not_undef()?;
106 let offset = this.read_scalar(offset)?.to_machine_isize(this)?;
108 let pointee_ty = substs.type_at(0);
109 let pointee_size = i64::try_from(this.layout_of(pointee_ty)?.size.bytes()).unwrap();
110 let offset = offset.overflowing_mul(pointee_size).0;
111 let result_ptr = ptr.ptr_wrapping_signed_offset(offset, this);
112 this.write_scalar(result_ptr, dest)?;
115 let &[ptr, offset] = check_arg_count(args)?;
116 let ptr = this.read_scalar(ptr)?.not_undef()?;
117 let offset = this.read_scalar(offset)?.to_machine_isize(this)?;
118 let result_ptr = this.pointer_offset_inbounds(ptr, substs.type_at(0), offset)?;
119 this.write_scalar(result_ptr, dest)?;
122 // Floating-point operations
138 let &[f] = check_arg_count(args)?;
139 // FIXME: Using host floats.
140 let f = f32::from_bits(this.read_scalar(f)?.to_u32()?);
141 let f = match intrinsic_name {
143 "fabsf32" => f.abs(),
145 "sqrtf32" => f.sqrt(),
147 "exp2f32" => f.exp2(),
149 "log10f32" => f.log10(),
150 "log2f32" => f.log2(),
151 "floorf32" => f.floor(),
152 "ceilf32" => f.ceil(),
153 "truncf32" => f.trunc(),
154 "roundf32" => f.round(),
157 this.write_scalar(Scalar::from_u32(f.to_bits()), dest)?;
175 let &[f] = check_arg_count(args)?;
176 // FIXME: Using host floats.
177 let f = f64::from_bits(this.read_scalar(f)?.to_u64()?);
178 let f = match intrinsic_name {
180 "fabsf64" => f.abs(),
182 "sqrtf64" => f.sqrt(),
184 "exp2f64" => f.exp2(),
186 "log10f64" => f.log10(),
187 "log2f64" => f.log2(),
188 "floorf64" => f.floor(),
189 "ceilf64" => f.ceil(),
190 "truncf64" => f.trunc(),
191 "roundf64" => f.round(),
194 this.write_scalar(Scalar::from_u64(f.to_bits()), dest)?;
204 let &[a, b] = check_arg_count(args)?;
205 let a = this.read_immediate(a)?;
206 let b = this.read_immediate(b)?;
207 let op = match intrinsic_name {
208 "fadd_fast" => mir::BinOp::Add,
209 "fsub_fast" => mir::BinOp::Sub,
210 "fmul_fast" => mir::BinOp::Mul,
211 "fdiv_fast" => mir::BinOp::Div,
212 "frem_fast" => mir::BinOp::Rem,
215 this.binop_ignore_overflow(op, a, b, dest)?;
223 let &[a, b] = check_arg_count(args)?;
224 let a = this.read_scalar(a)?.to_f32()?;
225 let b = this.read_scalar(b)?.to_f32()?;
226 let res = match intrinsic_name {
227 "minnumf32" => a.min(b),
228 "maxnumf32" => a.max(b),
229 "copysignf32" => a.copy_sign(b),
232 this.write_scalar(Scalar::from_f32(res), dest)?;
240 let &[a, b] = check_arg_count(args)?;
241 let a = this.read_scalar(a)?.to_f64()?;
242 let b = this.read_scalar(b)?.to_f64()?;
243 let res = match intrinsic_name {
244 "minnumf64" => a.min(b),
245 "maxnumf64" => a.max(b),
246 "copysignf64" => a.copy_sign(b),
249 this.write_scalar(Scalar::from_f64(res), dest)?;
253 let &[f, f2] = check_arg_count(args)?;
254 // FIXME: Using host floats.
255 let f = f32::from_bits(this.read_scalar(f)?.to_u32()?);
256 let f2 = f32::from_bits(this.read_scalar(f2)?.to_u32()?);
257 this.write_scalar(Scalar::from_u32(f.powf(f2).to_bits()), dest)?;
261 let &[f, f2] = check_arg_count(args)?;
262 // FIXME: Using host floats.
263 let f = f64::from_bits(this.read_scalar(f)?.to_u64()?);
264 let f2 = f64::from_bits(this.read_scalar(f2)?.to_u64()?);
265 this.write_scalar(Scalar::from_u64(f.powf(f2).to_bits()), dest)?;
269 let &[a, b, c] = check_arg_count(args)?;
270 let a = this.read_scalar(a)?.to_f32()?;
271 let b = this.read_scalar(b)?.to_f32()?;
272 let c = this.read_scalar(c)?.to_f32()?;
273 let res = a.mul_add(b, c).value;
274 this.write_scalar(Scalar::from_f32(res), dest)?;
278 let &[a, b, c] = check_arg_count(args)?;
279 let a = this.read_scalar(a)?.to_f64()?;
280 let b = this.read_scalar(b)?.to_f64()?;
281 let c = this.read_scalar(c)?.to_f64()?;
282 let res = a.mul_add(b, c).value;
283 this.write_scalar(Scalar::from_f64(res), dest)?;
287 let &[f, i] = check_arg_count(args)?;
288 // FIXME: Using host floats.
289 let f = f32::from_bits(this.read_scalar(f)?.to_u32()?);
290 let i = this.read_scalar(i)?.to_i32()?;
291 this.write_scalar(Scalar::from_u32(f.powi(i).to_bits()), dest)?;
295 let &[f, i] = check_arg_count(args)?;
296 // FIXME: Using host floats.
297 let f = f64::from_bits(this.read_scalar(f)?.to_u64()?);
298 let i = this.read_scalar(i)?.to_i32()?;
299 this.write_scalar(Scalar::from_u64(f.powi(i).to_bits()), dest)?;
302 "float_to_int_unchecked" => {
303 let &[val] = check_arg_count(args)?;
304 let val = this.read_immediate(val)?;
306 let res = match val.layout.ty.kind {
307 ty::Float(FloatTy::F32) => {
308 this.float_to_int_unchecked(val.to_scalar()?.to_f32()?, dest.layout.ty)?
310 ty::Float(FloatTy::F64) => {
311 this.float_to_int_unchecked(val.to_scalar()?.to_f64()?, dest.layout.ty)?
313 _ => bug!("`float_to_int_unchecked` called with non-float input type {:?}", val.layout.ty),
316 this.write_scalar(res, dest)?;
322 | "atomic_load_relaxed"
325 let &[place] = check_arg_count(args)?;
326 let place = this.deref_operand(place)?;
327 let val = this.read_scalar(place.into())?; // make sure it fits into a scalar; otherwise it cannot be atomic
329 // Check alignment requirements. Atomics must always be aligned to their size,
330 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
332 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
333 this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
335 this.write_scalar(val, dest)?;
340 | "atomic_store_relaxed"
343 let &[place, val] = check_arg_count(args)?;
344 let place = this.deref_operand(place)?;
345 let val = this.read_scalar(val)?; // make sure it fits into a scalar; otherwise it cannot be atomic
347 // Check alignment requirements. Atomics must always be aligned to their size,
348 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
350 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
351 this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
353 this.write_scalar(val, place.into())?;
359 | "atomic_fence_acqrel"
361 | "atomic_singlethreadfence_acq"
362 | "atomic_singlethreadfence_rel"
363 | "atomic_singlethreadfence_acqrel"
364 | "atomic_singlethreadfence"
366 let &[] = check_arg_count(args)?;
367 // FIXME: this will become relevant once we try to detect data races.
370 _ if intrinsic_name.starts_with("atomic_xchg") => {
371 let &[place, new] = check_arg_count(args)?;
372 let place = this.deref_operand(place)?;
373 let new = this.read_scalar(new)?;
374 let old = this.read_scalar(place.into())?;
376 // Check alignment requirements. Atomics must always be aligned to their size,
377 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
379 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
380 this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
382 this.write_scalar(old, dest)?; // old value is returned
383 this.write_scalar(new, place.into())?;
386 _ if intrinsic_name.starts_with("atomic_cxchg") => {
387 let &[place, expect_old, new] = check_arg_count(args)?;
388 let place = this.deref_operand(place)?;
389 let expect_old = this.read_immediate(expect_old)?; // read as immediate for the sake of `binary_op()`
390 let new = this.read_scalar(new)?;
391 let old = this.read_immediate(place.into())?; // read as immediate for the sake of `binary_op()`
393 // Check alignment requirements. Atomics must always be aligned to their size,
394 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
396 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
397 this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
399 // `binary_op` will bail if either of them is not a scalar.
400 let eq = this.overflowing_binary_op(mir::BinOp::Eq, old, expect_old)?.0;
401 let res = Immediate::ScalarPair(old.to_scalar_or_undef(), eq.into());
403 this.write_immediate(res, dest)?;
404 // Update ptr depending on comparison.
406 this.write_scalar(new, place.into())?;
415 | "atomic_or_relaxed"
419 | "atomic_xor_acqrel"
420 | "atomic_xor_relaxed"
424 | "atomic_and_acqrel"
425 | "atomic_and_relaxed"
429 | "atomic_nand_acqrel"
430 | "atomic_nand_relaxed"
434 | "atomic_xadd_acqrel"
435 | "atomic_xadd_relaxed"
439 | "atomic_xsub_acqrel"
440 | "atomic_xsub_relaxed"
442 let &[place, rhs] = check_arg_count(args)?;
443 let place = this.deref_operand(place)?;
444 if !place.layout.ty.is_integral() {
445 bug!("Atomic arithmetic operations only work on integer types");
447 let rhs = this.read_immediate(rhs)?;
448 let old = this.read_immediate(place.into())?;
450 // Check alignment requirements. Atomics must always be aligned to their size,
451 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
453 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
454 this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
456 this.write_immediate(*old, dest)?; // old value is returned
457 let (op, neg) = match intrinsic_name.split('_').nth(1).unwrap() {
458 "or" => (mir::BinOp::BitOr, false),
459 "xor" => (mir::BinOp::BitXor, false),
460 "and" => (mir::BinOp::BitAnd, false),
461 "xadd" => (mir::BinOp::Add, false),
462 "xsub" => (mir::BinOp::Sub, false),
463 "nand" => (mir::BinOp::BitAnd, true),
466 // Atomics wrap around on overflow.
467 let val = this.binary_op(op, old, rhs)?;
468 let val = if neg { this.unary_op(mir::UnOp::Not, val)? } else { val };
469 this.write_immediate(*val, place.into())?;
472 // Query type information
474 "assert_zero_valid" |
475 "assert_uninit_valid" => {
476 let &[] = check_arg_count(args)?;
477 let ty = substs.type_at(0);
478 let layout = this.layout_of(ty)?;
479 // Abort here because the caller might not be panic safe.
480 if layout.abi.is_uninhabited() {
481 throw_machine_stop!(TerminationInfo::Abort(Some(format!("attempted to instantiate uninhabited type `{}`", ty))))
483 if intrinsic_name == "assert_zero_valid" && !layout.might_permit_raw_init(this, /*zero:*/ true).unwrap() {
484 throw_machine_stop!(TerminationInfo::Abort(Some(format!("attempted to zero-initialize type `{}`, which is invalid", ty))))
486 if intrinsic_name == "assert_uninit_valid" && !layout.might_permit_raw_init(this, /*zero:*/ false).unwrap() {
487 throw_machine_stop!(TerminationInfo::Abort(Some(format!("attempted to leave type `{}` uninitialized, which is invalid", ty))))
491 "min_align_of_val" => {
492 let &[mplace] = check_arg_count(args)?;
493 let mplace = this.deref_operand(mplace)?;
494 let (_, align) = this
495 .size_and_align_of_mplace(mplace)?
496 .expect("size_of_val called on extern type");
497 this.write_scalar(Scalar::from_machine_usize(align.bytes(), this), dest)?;
501 let &[mplace] = check_arg_count(args)?;
502 let mplace = this.deref_operand(mplace)?;
504 .size_and_align_of_mplace(mplace)?
505 .expect("size_of_val called on extern type");
506 this.write_scalar(Scalar::from_machine_usize(size.bytes(), this), dest)?;
511 let &[cond] = check_arg_count(args)?;
512 let cond = this.read_scalar(cond)?.not_undef()?.to_bool()?;
514 throw_ub_format!("`assume` intrinsic called with `false`");
519 let &[num, denom] = check_arg_count(args)?;
520 this.exact_div(this.read_immediate(num)?, this.read_immediate(denom)?, dest)?;
524 // We get an argument... and forget about it.
525 let &[_] = check_arg_count(args)?;
532 // These just return their argument
533 let &[b] = check_arg_count(args)?;
534 let b = this.read_immediate(b)?;
535 this.write_immediate(*b, dest)?;
538 "try" => return this.handle_try(args, dest, ret),
540 name => throw_unsup_format!("unimplemented intrinsic: {}", name),
543 this.dump_place(*dest);
544 this.go_to_block(ret);
548 fn float_to_int_unchecked<F>(
551 dest_ty: ty::Ty<'tcx>,
552 ) -> InterpResult<'tcx, Scalar<Tag>>
554 F: Float + Into<Scalar<Tag>>
556 let this = self.eval_context_ref();
558 // Step 1: cut off the fractional part of `f`. The result of this is
559 // guaranteed to be precisely representable in IEEE floats.
560 let f = f.round_to_integral(Round::TowardZero).value;
562 // Step 2: Cast the truncated float to the target integer type and see if we lose any information in this step.
563 Ok(match dest_ty.kind {
566 let width = t.bit_width().unwrap_or_else(|| this.pointer_size().bits());
567 let res = f.to_u128(usize::try_from(width).unwrap());
568 if res.status.is_empty() {
569 // No status flags means there was no further rounding or other loss of precision.
570 Scalar::from_uint(res.value, Size::from_bits(width))
572 // `f` was not representable in this integer type.
574 "`float_to_int_unchecked` intrinsic called on {} which cannot be represented in target type `{:?}`",
581 let width = t.bit_width().unwrap_or_else(|| this.pointer_size().bits());
582 let res = f.to_i128(usize::try_from(width).unwrap());
583 if res.status.is_empty() {
584 // No status flags means there was no further rounding or other loss of precision.
585 Scalar::from_int(res.value, Size::from_bits(width))
587 // `f` was not representable in this integer type.
589 "`float_to_int_unchecked` intrinsic called on {} which cannot be represented in target type `{:?}`",
595 _ => bug!("`float_to_int_unchecked` called with non-int output type {:?}", dest_ty),