2 use rustc::ty::layout::{self, LayoutOf, Size, Primitive, Integer::*};
5 use rustc::mir::interpret::{EvalResult, Scalar, ScalarMaybeUndef};
6 use rustc_mir::interpret::{
7 PlaceExtra, PlaceTy, EvalContext, OpTy, Value
10 use super::{ScalarExt, FalibleScalarExt, OperatorEvalContextExt};
12 pub trait EvalContextExt<'tcx> {
15 instance: ty::Instance<'tcx>,
18 target: mir::BasicBlock,
19 ) -> EvalResult<'tcx>;
22 impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for EvalContext<'a, 'mir, 'tcx, super::Evaluator<'tcx>> {
25 instance: ty::Instance<'tcx>,
28 target: mir::BasicBlock,
29 ) -> EvalResult<'tcx> {
30 let substs = instance.substs;
32 let intrinsic_name = &self.tcx.item_name(instance.def_id()).as_str()[..];
33 match intrinsic_name {
34 "add_with_overflow" => {
35 let l = self.read_value(args[0])?;
36 let r = self.read_value(args[1])?;
37 self.binop_with_overflow(
45 "sub_with_overflow" => {
46 let l = self.read_value(args[0])?;
47 let r = self.read_value(args[1])?;
48 self.binop_with_overflow(
56 "mul_with_overflow" => {
57 let l = self.read_value(args[0])?;
58 let r = self.read_value(args[1])?;
59 self.binop_with_overflow(
68 let offset = self.read_scalar(args[1])?.to_isize(&self)?;
69 let ptr = self.read_scalar(args[0])?.not_undef()?;
71 let pointee_ty = substs.type_at(0);
72 let pointee_size = self.layout_of(pointee_ty)?.size.bytes() as i64;
73 let offset = offset.overflowing_mul(pointee_size).0;
74 let result_ptr = ptr.ptr_wrapping_signed_offset(offset, &self);
75 self.write_scalar(result_ptr, dest)?;
79 let cond = self.read_scalar(args[0])?.to_bool()?;
81 return err!(AssumptionNotHeld);
86 "atomic_load_relaxed" |
89 let ptr = self.ref_to_mplace(self.read_value(args[0])?)?;
90 let val = self.read_scalar(ptr.into())?; // make sure it fits into a scalar; otherwise it cannot be atomic
91 self.write_scalar(val, dest)?;
95 "atomic_store_relaxed" |
98 let ptr = self.ref_to_mplace(self.read_value(args[0])?)?;
99 let val = self.read_scalar(args[1])?; // make sure it fits into a scalar; otherwise it cannot be atomic
100 self.write_scalar(val, ptr.into())?;
103 "atomic_fence_acq" => {
104 // we are inherently singlethreaded and singlecored, this is a nop
107 _ if intrinsic_name.starts_with("atomic_xchg") => {
108 let ptr = self.ref_to_mplace(self.read_value(args[0])?)?;
109 let new = self.read_scalar(args[1])?;
110 let old = self.read_scalar(ptr.into())?;
111 self.write_scalar(old, dest)?; // old value is returned
112 self.write_scalar(new, ptr.into())?;
115 _ if intrinsic_name.starts_with("atomic_cxchg") => {
116 let ptr = self.ref_to_mplace(self.read_value(args[0])?)?;
117 let expect_old = self.read_value(args[1])?; // read as value for the sake of `binary_op()`
118 let new = self.read_scalar(args[2])?;
119 let old = self.read_value(ptr.into())?; // read as value for the sake of `binary_op()`
120 // binary_op will bail if either of them is not a scalar
121 let (eq, _) = self.binary_op(mir::BinOp::Eq, old, expect_old)?;
122 let res = Value::ScalarPair(old.to_scalar_or_undef(), eq.into());
123 self.write_value(res, dest)?; // old value is returned
124 // update ptr depending on comparison
126 self.write_scalar(new, ptr.into())?;
134 "atomic_or_relaxed" |
138 "atomic_xor_acqrel" |
139 "atomic_xor_relaxed" |
143 "atomic_and_acqrel" |
144 "atomic_and_relaxed" |
148 "atomic_xadd_acqrel" |
149 "atomic_xadd_relaxed" |
153 "atomic_xsub_acqrel" |
154 "atomic_xsub_relaxed" => {
155 let ptr = self.ref_to_mplace(self.read_value(args[0])?)?;
156 let rhs = self.read_value(args[1])?;
157 let old = self.read_value(ptr.into())?;
158 self.write_value(*old, dest)?; // old value is returned
159 let op = match intrinsic_name.split('_').nth(1).unwrap() {
160 "or" => mir::BinOp::BitOr,
161 "xor" => mir::BinOp::BitXor,
162 "and" => mir::BinOp::BitAnd,
163 "xadd" => mir::BinOp::Add,
164 "xsub" => mir::BinOp::Sub,
167 // FIXME: what do atomics do on overflow?
168 let (val, _) = self.binary_op(op, old, rhs)?;
169 self.write_scalar(val, ptr.into())?;
172 "breakpoint" => unimplemented!(), // halt miri
175 "copy_nonoverlapping" => {
176 let elem_ty = substs.type_at(0);
177 let elem_layout = self.layout_of(elem_ty)?;
178 let elem_size = elem_layout.size.bytes();
179 let count = self.read_scalar(args[2])?.to_usize(&self)?;
180 if count * elem_size != 0 {
181 // TODO: We do not even validate alignment for the 0-bytes case. libstd relies on this in vec::IntoIter::next.
182 // Also see the write_bytes intrinsic.
183 let elem_align = elem_layout.align;
184 let src = self.read_scalar(args[0])?.not_undef()?;
185 let dest = self.read_scalar(args[1])?.not_undef()?;
191 Size::from_bytes(count * elem_size),
192 intrinsic_name.ends_with("_nonoverlapping"),
197 "ctpop" | "cttz" | "cttz_nonzero" | "ctlz" | "ctlz_nonzero" | "bswap" => {
198 let ty = substs.type_at(0);
199 let num = self.read_scalar(args[0])?.to_bytes()?;
200 let kind = match self.layout_of(ty)?.abi {
201 ty::layout::Abi::Scalar(ref scalar) => scalar.value,
202 _ => Err(::rustc::mir::interpret::EvalErrorKind::TypeNotPrimitive(ty))?,
204 let num = if intrinsic_name.ends_with("_nonzero") {
206 return err!(Intrinsic(format!("{} called on 0", intrinsic_name)));
208 numeric_intrinsic(intrinsic_name.trim_right_matches("_nonzero"), num, kind)?
210 numeric_intrinsic(intrinsic_name, num, kind)?
212 self.write_scalar(num, dest)?;
215 "discriminant_value" => {
216 let place = self.ref_to_mplace(self.read_value(args[0])?)?;
217 let discr_val = self.read_discriminant_value(place.into())?;
218 self.write_scalar(Scalar::from_uint(discr_val, dest.layout.size), dest)?;
221 "sinf32" | "fabsf32" | "cosf32" | "sqrtf32" | "expf32" | "exp2f32" | "logf32" |
222 "log10f32" | "log2f32" | "floorf32" | "ceilf32" | "truncf32" => {
223 let f = self.read_scalar(args[0])?.to_bytes()?;
224 let f = f32::from_bits(f as u32);
225 let f = match intrinsic_name {
227 "fabsf32" => f.abs(),
229 "sqrtf32" => f.sqrt(),
231 "exp2f32" => f.exp2(),
233 "log10f32" => f.log10(),
234 "log2f32" => f.log2(),
235 "floorf32" => f.floor(),
236 "ceilf32" => f.ceil(),
237 "truncf32" => f.trunc(),
240 self.write_scalar(Scalar::from_f32(f), dest)?;
243 "sinf64" | "fabsf64" | "cosf64" | "sqrtf64" | "expf64" | "exp2f64" | "logf64" |
244 "log10f64" | "log2f64" | "floorf64" | "ceilf64" | "truncf64" => {
245 let f = self.read_scalar(args[0])?.to_bytes()?;
246 let f = f64::from_bits(f as u64);
247 let f = match intrinsic_name {
249 "fabsf64" => f.abs(),
251 "sqrtf64" => f.sqrt(),
253 "exp2f64" => f.exp2(),
255 "log10f64" => f.log10(),
256 "log2f64" => f.log2(),
257 "floorf64" => f.floor(),
258 "ceilf64" => f.ceil(),
259 "truncf64" => f.trunc(),
262 self.write_scalar(Scalar::from_f64(f), dest)?;
265 "fadd_fast" | "fsub_fast" | "fmul_fast" | "fdiv_fast" | "frem_fast" => {
266 let a = self.read_value(args[0])?;
267 let b = self.read_value(args[1])?;
268 let op = match intrinsic_name {
269 "fadd_fast" => mir::BinOp::Add,
270 "fsub_fast" => mir::BinOp::Sub,
271 "fmul_fast" => mir::BinOp::Mul,
272 "fdiv_fast" => mir::BinOp::Div,
273 "frem_fast" => mir::BinOp::Rem,
276 let result = self.binary_op(op, a, b)?;
277 self.write_scalar(result.0, dest)?;
281 // Performs an exact division, resulting in undefined behavior where
282 // `x % y != 0` or `y == 0` or `x == T::min_value() && y == -1`
283 let a = self.read_value(args[0])?;
284 let b = self.read_value(args[1])?;
286 if !self.binary_op(mir::BinOp::Rem, a, b)?.0.is_null() {
287 return err!(ValidationFailure(format!("exact_div: {:?} cannot be divided by {:?}", a, b)));
289 let result = self.binary_op(mir::BinOp::Div, a, b)?;
290 self.write_scalar(result.0, dest)?;
293 "likely" | "unlikely" | "forget" => {}
296 // Check fast path: we don't want to force an allocation in case the destination is a simple value,
297 // but we also do not want to create a new allocation with 0s and then copy that over.
298 match dest.layout.abi {
299 layout::Abi::Scalar(ref s) => {
300 let x = Scalar::null(s.value.size(&self));
301 self.write_value(Value::Scalar(x.into()), dest)?;
303 layout::Abi::ScalarPair(ref s1, ref s2) => {
304 let x = Scalar::null(s1.value.size(&self));
305 let y = Scalar::null(s2.value.size(&self));
306 self.write_value(Value::ScalarPair(x.into(), y.into()), dest)?;
310 let mplace = self.force_allocation(dest)?;
311 assert_eq!(mplace.extra, PlaceExtra::None);
312 self.memory.write_repeat(mplace.ptr, 0, dest.layout.size)?;
318 let elem_ty = substs.type_at(0);
319 let elem_align = self.layout_of(elem_ty)?.align.abi();
320 let ptr_size = self.memory.pointer_size();
321 let align_val = Scalar::from_uint(elem_align as u128, ptr_size);
322 self.write_scalar(align_val, dest)?;
326 let ty = substs.type_at(0);
327 let layout = self.layout_of(ty)?;
328 let align = layout.align.pref();
329 let ptr_size = self.memory.pointer_size();
330 let align_val = Scalar::from_uint(align as u128, ptr_size);
331 self.write_scalar(align_val, dest)?;
335 let ptr = self.ref_to_mplace(self.read_value(args[0])?)?;
336 self.copy_op(args[1], ptr.into())?;
340 let ty = substs.type_at(0);
341 let env = ty::ParamEnv::reveal_all();
342 let needs_drop = ty.needs_drop(self.tcx.tcx, env);
344 Scalar::from_bool(needs_drop),
350 let offset = self.read_scalar(args[1])?.to_isize(&self)?;
351 let ptr = self.read_scalar(args[0])?.not_undef()?;
352 let result_ptr = self.pointer_offset_inbounds(ptr, substs.type_at(0), offset)?;
353 self.write_scalar(result_ptr, dest)?;
356 "overflowing_sub" => {
357 let l = self.read_value(args[0])?;
358 let r = self.read_value(args[1])?;
359 self.binop_ignore_overflow(
367 "overflowing_mul" => {
368 let l = self.read_value(args[0])?;
369 let r = self.read_value(args[1])?;
370 self.binop_ignore_overflow(
378 "overflowing_add" => {
379 let l = self.read_value(args[0])?;
380 let r = self.read_value(args[1])?;
381 self.binop_ignore_overflow(
390 let f = self.read_scalar(args[0])?.to_bits(Size::from_bits(32))?;
391 let f = f32::from_bits(f as u32);
392 let f2 = self.read_scalar(args[1])?.to_bits(Size::from_bits(32))?;
393 let f2 = f32::from_bits(f2 as u32);
395 Scalar::from_f32(f.powf(f2)),
401 let f = self.read_scalar(args[0])?.to_bits(Size::from_bits(64))?;
402 let f = f64::from_bits(f as u64);
403 let f2 = self.read_scalar(args[1])?.to_bits(Size::from_bits(64))?;
404 let f2 = f64::from_bits(f2 as u64);
406 Scalar::from_f64(f.powf(f2)),
412 let a = self.read_scalar(args[0])?.to_bits(Size::from_bits(32))?;
413 let a = f32::from_bits(a as u32);
414 let b = self.read_scalar(args[1])?.to_bits(Size::from_bits(32))?;
415 let b = f32::from_bits(b as u32);
416 let c = self.read_scalar(args[2])?.to_bits(Size::from_bits(32))?;
417 let c = f32::from_bits(c as u32);
419 Scalar::from_f32(a * b + c),
425 let a = self.read_scalar(args[0])?.to_bits(Size::from_bits(64))?;
426 let a = f64::from_bits(a as u64);
427 let b = self.read_scalar(args[1])?.to_bits(Size::from_bits(64))?;
428 let b = f64::from_bits(b as u64);
429 let c = self.read_scalar(args[2])?.to_bits(Size::from_bits(64))?;
430 let c = f64::from_bits(c as u64);
432 Scalar::from_f64(a * b + c),
438 let f = self.read_scalar(args[0])?.to_bits(Size::from_bits(32))?;
439 let f = f32::from_bits(f as u32);
440 let i = self.read_scalar(args[1])?.to_i32()?;
442 Scalar::from_f32(f.powi(i)),
448 let f = self.read_scalar(args[0])?.to_bits(Size::from_bits(64))?;
449 let f = f64::from_bits(f as u64);
450 let i = self.read_scalar(args[1])?.to_i32()?;
452 Scalar::from_f64(f.powi(i)),
458 let ty = substs.type_at(0);
459 let size = self.layout_of(ty)?.size.bytes();
460 let ptr_size = self.memory.pointer_size();
461 self.write_scalar(Scalar::from_uint(size, ptr_size), dest)?;
465 let mplace = self.ref_to_mplace(self.read_value(args[0])?)?;
466 let (size, _) = self.size_and_align_of_mplace(mplace)?;
467 let ptr_size = self.memory.pointer_size();
469 Scalar::from_uint(size.bytes() as u128, ptr_size),
476 let mplace = self.ref_to_mplace(self.read_value(args[0])?)?;
477 let (_, align) = self.size_and_align_of_mplace(mplace)?;
478 let ptr_size = self.memory.pointer_size();
480 Scalar::from_uint(align.abi(), ptr_size),
486 let ty = substs.type_at(0);
487 let ty_name = ty.to_string();
488 let value = self.str_to_value(&ty_name)?;
489 self.write_value(value, dest)?;
492 let ty = substs.type_at(0);
493 let n = self.tcx.type_id_hash(ty);
494 self.write_scalar(Scalar::Bits { bits: n as u128, size: 8 }, dest)?;
498 // Go through an allocation, to make sure the completely different layouts
499 // do not pose a problem. (When the user transmutes through a union,
500 // there will not be a layout mismatch.)
501 let dest = self.force_allocation(dest)?;
502 self.copy_op(args[0], dest.into())?;
506 let bits = dest.layout.size.bytes() as u128 * 8;
507 let l = self.read_value(args[0])?;
508 let r = self.read_value(args[1])?;
509 let rval = r.to_scalar()?.to_bytes()?;
511 return err!(Intrinsic(
512 format!("Overflowing shift by {} in unchecked_shl", rval),
515 self.binop_ignore_overflow(
524 let bits = dest.layout.size.bytes() as u128 * 8;
525 let l = self.read_value(args[0])?;
526 let r = self.read_value(args[1])?;
527 let rval = r.to_scalar()?.to_bytes()?;
529 return err!(Intrinsic(
530 format!("Overflowing shift by {} in unchecked_shr", rval),
533 self.binop_ignore_overflow(
542 let l = self.read_value(args[0])?;
543 let r = self.read_value(args[1])?;
544 let rval = r.to_scalar()?.to_bytes()?;
546 return err!(Intrinsic(format!("Division by 0 in unchecked_div")));
548 self.binop_ignore_overflow(
557 let l = self.read_value(args[0])?;
558 let r = self.read_value(args[1])?;
559 let rval = r.to_scalar()?.to_bytes()?;
561 return err!(Intrinsic(format!("Division by 0 in unchecked_rem")));
563 self.binop_ignore_overflow(
572 // Check fast path: we don't want to force an allocation in case the destination is a simple value,
573 // but we also do not want to create a new allocation with 0s and then copy that over.
574 match dest.layout.abi {
575 layout::Abi::Scalar(..) => {
576 let x = ScalarMaybeUndef::Undef;
577 self.write_value(Value::Scalar(x), dest)?;
579 layout::Abi::ScalarPair(..) => {
580 let x = ScalarMaybeUndef::Undef;
581 self.write_value(Value::ScalarPair(x, x), dest)?;
585 let mplace = self.force_allocation(dest)?;
586 assert_eq!(mplace.extra, PlaceExtra::None);
587 self.memory.mark_definedness(mplace.ptr, dest.layout.size, false)?;
593 let ty = substs.type_at(0);
594 let ty_layout = self.layout_of(ty)?;
595 let val_byte = self.read_scalar(args[1])?.to_u8()?;
596 let ptr = self.read_scalar(args[0])?.not_undef()?;
597 let count = self.read_scalar(args[2])?.to_usize(&self)?;
599 // HashMap relies on write_bytes on a NULL ptr with count == 0 to work
600 // TODO: Should we, at least, validate the alignment? (Also see the copy intrinsic)
601 self.memory.check_align(ptr, ty_layout.align)?;
602 self.memory.write_repeat(ptr, val_byte, ty_layout.size * count)?;
606 name => return err!(Unimplemented(format!("unimplemented intrinsic: {}", name))),
609 self.goto_block(target);
611 // Since we pushed no stack frame, the main loop will act
612 // as if the call just completed and it's returning to the
618 fn numeric_intrinsic<'tcx>(
622 ) -> EvalResult<'tcx, Scalar> {
623 macro_rules! integer_intrinsic {
624 ($method:ident) => ({
625 let (result_bytes, size) = match kind {
626 Primitive::Int(I8, true) => ((bytes as i8).$method() as u128, 1),
627 Primitive::Int(I8, false) => ((bytes as u8).$method() as u128, 1),
628 Primitive::Int(I16, true) => ((bytes as i16).$method() as u128, 2),
629 Primitive::Int(I16, false) => ((bytes as u16).$method() as u128, 2),
630 Primitive::Int(I32, true) => ((bytes as i32).$method() as u128, 4),
631 Primitive::Int(I32, false) => ((bytes as u32).$method() as u128, 4),
632 Primitive::Int(I64, true) => ((bytes as i64).$method() as u128, 8),
633 Primitive::Int(I64, false) => ((bytes as u64).$method() as u128, 8),
634 Primitive::Int(I128, true) => ((bytes as i128).$method() as u128, 16),
635 Primitive::Int(I128, false) => (bytes.$method() as u128, 16),
636 _ => bug!("invalid `{}` argument: {:?}", name, bytes),
639 Scalar::from_uint(result_bytes, Size::from_bytes(size))
643 let result_val = match name {
644 "bswap" => integer_intrinsic!(swap_bytes),
645 "ctlz" => integer_intrinsic!(leading_zeros),
646 "ctpop" => integer_intrinsic!(count_ones),
647 "cttz" => integer_intrinsic!(trailing_zeros),
648 _ => bug!("not a numeric intrinsic: {}", name),