2 use rustc::ty::layout::{self, LayoutOf, Size};
5 use rustc::mir::interpret::{EvalResult, PointerArithmetic};
8 PlaceTy, OpTy, Value, Scalar, ScalarMaybeUndef, Borrow,
9 FalibleScalarExt, OperatorEvalContextExt
12 pub trait EvalContextExt<'tcx> {
15 instance: ty::Instance<'tcx>,
16 args: &[OpTy<'tcx, Borrow>],
17 dest: PlaceTy<'tcx, Borrow>,
18 ) -> EvalResult<'tcx>;
21 impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, 'tcx> {
24 instance: ty::Instance<'tcx>,
25 args: &[OpTy<'tcx, Borrow>],
26 dest: PlaceTy<'tcx, Borrow>,
27 ) -> EvalResult<'tcx> {
28 if self.emulate_intrinsic(instance, args, dest)? {
32 let substs = instance.substs;
34 let intrinsic_name = &self.tcx.item_name(instance.def_id()).as_str()[..];
35 match intrinsic_name {
37 let offset = self.read_scalar(args[1])?.to_isize(&self)?;
38 let ptr = self.read_scalar(args[0])?.not_undef()?;
40 let pointee_ty = substs.type_at(0);
41 let pointee_size = self.layout_of(pointee_ty)?.size.bytes() as i64;
42 let offset = offset.overflowing_mul(pointee_size).0;
43 let result_ptr = ptr.ptr_wrapping_signed_offset(offset, &self);
44 self.write_scalar(result_ptr, dest)?;
48 let cond = self.read_scalar(args[0])?.to_bool()?;
50 return err!(AssumptionNotHeld);
55 "atomic_load_relaxed" |
58 let ptr = self.ref_to_mplace(self.read_value(args[0])?)?;
59 let val = self.read_scalar(ptr.into())?; // make sure it fits into a scalar; otherwise it cannot be atomic
60 self.write_scalar(val, dest)?;
64 "atomic_store_relaxed" |
67 let ptr = self.ref_to_mplace(self.read_value(args[0])?)?;
68 let val = self.read_scalar(args[1])?; // make sure it fits into a scalar; otherwise it cannot be atomic
69 self.write_scalar(val, ptr.into())?;
72 "atomic_fence_acq" => {
73 // we are inherently singlethreaded and singlecored, this is a nop
76 _ if intrinsic_name.starts_with("atomic_xchg") => {
77 let ptr = self.ref_to_mplace(self.read_value(args[0])?)?;
78 let new = self.read_scalar(args[1])?;
79 let old = self.read_scalar(ptr.into())?;
80 self.write_scalar(old, dest)?; // old value is returned
81 self.write_scalar(new, ptr.into())?;
84 _ if intrinsic_name.starts_with("atomic_cxchg") => {
85 let ptr = self.ref_to_mplace(self.read_value(args[0])?)?;
86 let expect_old = self.read_value(args[1])?; // read as value for the sake of `binary_op_val()`
87 let new = self.read_scalar(args[2])?;
88 let old = self.read_value(ptr.into())?; // read as value for the sake of `binary_op_val()`
89 // binary_op_val will bail if either of them is not a scalar
90 let (eq, _) = self.binary_op_val(mir::BinOp::Eq, old, expect_old)?;
91 let res = Value::ScalarPair(old.to_scalar_or_undef(), eq.into());
92 self.write_value(res, dest)?; // old value is returned
93 // update ptr depending on comparison
95 self.write_scalar(new, ptr.into())?;
103 "atomic_or_relaxed" |
107 "atomic_xor_acqrel" |
108 "atomic_xor_relaxed" |
112 "atomic_and_acqrel" |
113 "atomic_and_relaxed" |
117 "atomic_xadd_acqrel" |
118 "atomic_xadd_relaxed" |
122 "atomic_xsub_acqrel" |
123 "atomic_xsub_relaxed" => {
124 let ptr = self.ref_to_mplace(self.read_value(args[0])?)?;
125 let rhs = self.read_value(args[1])?;
126 let old = self.read_value(ptr.into())?;
127 self.write_value(*old, dest)?; // old value is returned
128 let op = match intrinsic_name.split('_').nth(1).unwrap() {
129 "or" => mir::BinOp::BitOr,
130 "xor" => mir::BinOp::BitXor,
131 "and" => mir::BinOp::BitAnd,
132 "xadd" => mir::BinOp::Add,
133 "xsub" => mir::BinOp::Sub,
136 // Atomics wrap around on overflow.
137 self.binop_ignore_overflow(op, old, rhs, ptr.into())?;
140 "breakpoint" => unimplemented!(), // halt miri
143 "copy_nonoverlapping" => {
144 let elem_ty = substs.type_at(0);
145 let elem_layout = self.layout_of(elem_ty)?;
146 let elem_size = elem_layout.size.bytes();
147 let count = self.read_scalar(args[2])?.to_usize(&self)?;
148 let elem_align = elem_layout.align;
149 let src = self.read_scalar(args[0])?.not_undef()?;
150 let dest = self.read_scalar(args[1])?.not_undef()?;
156 Size::from_bytes(count * elem_size),
157 intrinsic_name.ends_with("_nonoverlapping"),
161 "discriminant_value" => {
162 let place = self.ref_to_mplace(self.read_value(args[0])?)?;
163 let discr_val = self.read_discriminant(place.into())?.0;
164 self.write_scalar(Scalar::from_uint(discr_val, dest.layout.size), dest)?;
167 "sinf32" | "fabsf32" | "cosf32" | "sqrtf32" | "expf32" | "exp2f32" | "logf32" |
168 "log10f32" | "log2f32" | "floorf32" | "ceilf32" | "truncf32" => {
169 let f = self.read_scalar(args[0])?.to_f32()?;
170 let f = match intrinsic_name {
172 "fabsf32" => f.abs(),
174 "sqrtf32" => f.sqrt(),
176 "exp2f32" => f.exp2(),
178 "log10f32" => f.log10(),
179 "log2f32" => f.log2(),
180 "floorf32" => f.floor(),
181 "ceilf32" => f.ceil(),
182 "truncf32" => f.trunc(),
185 self.write_scalar(Scalar::from_f32(f), dest)?;
188 "sinf64" | "fabsf64" | "cosf64" | "sqrtf64" | "expf64" | "exp2f64" | "logf64" |
189 "log10f64" | "log2f64" | "floorf64" | "ceilf64" | "truncf64" => {
190 let f = self.read_scalar(args[0])?.to_f64()?;
191 let f = match intrinsic_name {
193 "fabsf64" => f.abs(),
195 "sqrtf64" => f.sqrt(),
197 "exp2f64" => f.exp2(),
199 "log10f64" => f.log10(),
200 "log2f64" => f.log2(),
201 "floorf64" => f.floor(),
202 "ceilf64" => f.ceil(),
203 "truncf64" => f.trunc(),
206 self.write_scalar(Scalar::from_f64(f), dest)?;
209 "fadd_fast" | "fsub_fast" | "fmul_fast" | "fdiv_fast" | "frem_fast" => {
210 let a = self.read_value(args[0])?;
211 let b = self.read_value(args[1])?;
212 let op = match intrinsic_name {
213 "fadd_fast" => mir::BinOp::Add,
214 "fsub_fast" => mir::BinOp::Sub,
215 "fmul_fast" => mir::BinOp::Mul,
216 "fdiv_fast" => mir::BinOp::Div,
217 "frem_fast" => mir::BinOp::Rem,
220 self.binop_ignore_overflow(op, a, b, dest)?;
224 // Performs an exact division, resulting in undefined behavior where
225 // `x % y != 0` or `y == 0` or `x == T::min_value() && y == -1`
226 let a = self.read_value(args[0])?;
227 let b = self.read_value(args[1])?;
229 if !self.binary_op_val(mir::BinOp::Rem, a, b)?.0.is_null() {
230 return err!(ValidationFailure(format!("exact_div: {:?} cannot be divided by {:?}", a, b)));
232 self.binop_ignore_overflow(mir::BinOp::Div, a, b, dest)?;
235 "likely" | "unlikely" | "forget" => {}
238 // Check fast path: we don't want to force an allocation in case the destination is a simple value,
239 // but we also do not want to create a new allocation with 0s and then copy that over.
240 // FIXME: We do not properly validate in case of ZSTs and when doing it in memory!
241 // However, this only affects direct calls of the intrinsic; calls to the stable
242 // functions wrapping them do get their validation.
243 if !dest.layout.is_zst() { // nothing to do for ZST
244 match dest.layout.abi {
245 layout::Abi::Scalar(ref s) => {
246 let x = Scalar::from_int(0, s.value.size(&self));
247 self.write_value(Value::Scalar(x.into()), dest)?;
249 layout::Abi::ScalarPair(ref s1, ref s2) => {
250 let x = Scalar::from_int(0, s1.value.size(&self));
251 let y = Scalar::from_int(0, s2.value.size(&self));
252 self.write_value(Value::ScalarPair(x.into(), y.into()), dest)?;
256 let mplace = self.force_allocation(dest)?;
257 assert!(mplace.meta.is_none());
258 self.memory.write_repeat(mplace.ptr, 0, dest.layout.size)?;
265 let ty = substs.type_at(0);
266 let layout = self.layout_of(ty)?;
267 let align = layout.align.pref();
268 let ptr_size = self.pointer_size();
269 let align_val = Scalar::from_uint(align as u128, ptr_size);
270 self.write_scalar(align_val, dest)?;
274 let ptr = self.ref_to_mplace(self.read_value(args[0])?)?;
275 self.copy_op(args[1], ptr.into())?;
279 let offset = self.read_scalar(args[1])?.to_isize(&self)?;
280 let ptr = self.read_scalar(args[0])?.not_undef()?;
281 let result_ptr = self.pointer_offset_inbounds(ptr, substs.type_at(0), offset)?;
282 self.write_scalar(result_ptr, dest)?;
286 let f = self.read_scalar(args[0])?.to_f32()?;
287 let f2 = self.read_scalar(args[1])?.to_f32()?;
289 Scalar::from_f32(f.powf(f2)),
295 let f = self.read_scalar(args[0])?.to_f64()?;
296 let f2 = self.read_scalar(args[1])?.to_f64()?;
298 Scalar::from_f64(f.powf(f2)),
304 let a = self.read_scalar(args[0])?.to_f32()?;
305 let b = self.read_scalar(args[1])?.to_f32()?;
306 let c = self.read_scalar(args[2])?.to_f32()?;
308 Scalar::from_f32(a * b + c),
314 let a = self.read_scalar(args[0])?.to_f64()?;
315 let b = self.read_scalar(args[1])?.to_f64()?;
316 let c = self.read_scalar(args[2])?.to_f64()?;
318 Scalar::from_f64(a * b + c),
324 let f = self.read_scalar(args[0])?.to_f32()?;
325 let i = self.read_scalar(args[1])?.to_i32()?;
327 Scalar::from_f32(f.powi(i)),
333 let f = self.read_scalar(args[0])?.to_f64()?;
334 let i = self.read_scalar(args[1])?.to_i32()?;
336 Scalar::from_f64(f.powi(i)),
342 let mplace = self.ref_to_mplace(self.read_value(args[0])?)?;
343 let (size, _) = self.size_and_align_of_mplace(mplace)?
344 .expect("size_of_val called on extern type");
345 let ptr_size = self.pointer_size();
347 Scalar::from_uint(size.bytes() as u128, ptr_size),
354 let mplace = self.ref_to_mplace(self.read_value(args[0])?)?;
355 let (_, align) = self.size_and_align_of_mplace(mplace)?
356 .expect("size_of_val called on extern type");
357 let ptr_size = self.pointer_size();
359 Scalar::from_uint(align.abi(), ptr_size),
365 let ty = substs.type_at(0);
366 let ty_name = ty.to_string();
367 let value = self.str_to_value(&ty_name)?;
368 self.write_value(value, dest)?;
372 let l = self.read_value(args[0])?;
373 let r = self.read_value(args[1])?;
374 let rval = r.to_scalar()?.to_bytes()?;
376 return err!(Intrinsic(format!("Division by 0 in unchecked_div")));
378 self.binop_ignore_overflow(
387 let l = self.read_value(args[0])?;
388 let r = self.read_value(args[1])?;
389 let rval = r.to_scalar()?.to_bytes()?;
391 return err!(Intrinsic(format!("Division by 0 in unchecked_rem")));
393 self.binop_ignore_overflow(
402 // Check fast path: we don't want to force an allocation in case the destination is a simple value,
403 // but we also do not want to create a new allocation with 0s and then copy that over.
404 // FIXME: We do not properly validate in case of ZSTs and when doing it in memory!
405 // However, this only affects direct calls of the intrinsic; calls to the stable
406 // functions wrapping them do get their validation.
407 if !dest.layout.is_zst() { // nothing to do for ZST
408 match dest.layout.abi {
409 layout::Abi::Scalar(..) => {
410 let x = ScalarMaybeUndef::Undef;
411 self.write_value(Value::Scalar(x), dest)?;
413 layout::Abi::ScalarPair(..) => {
414 let x = ScalarMaybeUndef::Undef;
415 self.write_value(Value::ScalarPair(x, x), dest)?;
419 let mplace = self.force_allocation(dest)?;
420 assert!(mplace.meta.is_none());
421 self.memory.mark_definedness(mplace.ptr.to_ptr()?, dest.layout.size, false)?;
428 let ty = substs.type_at(0);
429 let ty_layout = self.layout_of(ty)?;
430 let val_byte = self.read_scalar(args[1])?.to_u8()?;
431 let ptr = self.read_scalar(args[0])?.not_undef()?;
432 let count = self.read_scalar(args[2])?.to_usize(&self)?;
433 self.memory.check_align(ptr, ty_layout.align)?;
434 self.memory.write_repeat(ptr, val_byte, ty_layout.size * count)?;
437 name => return err!(Unimplemented(format!("unimplemented intrinsic: {}", name))),