2 use rustc::traits::Reveal;
3 use rustc::ty::layout::Layout;
4 use rustc::ty::{self, Ty};
10 PrimVal, PrimValKind, Value, Pointer,
15 impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
16 pub(super) fn call_intrinsic(
18 instance: ty::Instance<'tcx>,
19 args: &[mir::Operand<'tcx>],
22 dest_layout: &'tcx Layout,
23 target: mir::BasicBlock,
24 ) -> EvalResult<'tcx> {
25 let arg_vals: EvalResult<Vec<Value>> = args.iter()
26 .map(|arg| self.eval_operand(arg))
28 let arg_vals = arg_vals?;
29 let i32 = self.tcx.types.i32;
30 let isize = self.tcx.types.isize;
31 let usize = self.tcx.types.usize;
32 let f32 = self.tcx.types.f32;
33 let f64 = self.tcx.types.f64;
34 let substs = instance.substs;
36 let intrinsic_name = &self.tcx.item_name(instance.def_id()).as_str()[..];
37 match intrinsic_name {
38 "add_with_overflow" =>
39 self.intrinsic_with_overflow(mir::BinOp::Add, &args[0], &args[1], dest, dest_ty)?,
41 "sub_with_overflow" =>
42 self.intrinsic_with_overflow(mir::BinOp::Sub, &args[0], &args[1], dest, dest_ty)?,
44 "mul_with_overflow" =>
45 self.intrinsic_with_overflow(mir::BinOp::Mul, &args[0], &args[1], dest, dest_ty)?,
49 let offset = self.value_to_primval(arg_vals[1], isize)?.to_i128()? as i64;
50 let ptr = arg_vals[0].into_ptr(&self.memory)?;
51 let result_ptr = self.wrapping_pointer_offset(ptr, substs.type_at(0), offset)?;
52 self.write_ptr(dest, result_ptr, dest_ty)?;
56 let bool = self.tcx.types.bool;
57 let cond = self.value_to_primval(arg_vals[0], bool)?.to_bool()?;
58 if !cond { return Err(EvalError::AssumptionNotHeld); }
62 "atomic_load_relaxed" |
65 let ty = substs.type_at(0);
66 let ptr = arg_vals[0].into_ptr(&self.memory)?;
67 self.write_value(Value::by_ref(ptr), dest, ty)?;
71 "atomic_store_relaxed" |
74 let ty = substs.type_at(0);
75 let dest = arg_vals[0].into_ptr(&self.memory)?;
76 self.write_value_to_ptr(arg_vals[1], dest, ty)?;
79 "atomic_fence_acq" => {
80 // we are inherently singlethreaded and singlecored, this is a nop
83 _ if intrinsic_name.starts_with("atomic_xchg") => {
84 let ty = substs.type_at(0);
85 let ptr = arg_vals[0].into_ptr(&self.memory)?;
86 let change = self.value_to_primval(arg_vals[1], ty)?;
87 let old = self.read_value(ptr, ty)?;
89 Value::ByVal(val) => val,
90 Value::ByRef { .. } => bug!("just read the value, can't be byref"),
91 Value::ByValPair(..) => bug!("atomic_xchg doesn't work with nonprimitives"),
93 self.write_primval(dest, old, ty)?;
94 self.write_primval(Lvalue::from_primval_ptr(ptr), change, ty)?;
97 _ if intrinsic_name.starts_with("atomic_cxchg") => {
98 let ty = substs.type_at(0);
99 let ptr = arg_vals[0].into_ptr(&self.memory)?;
100 let expect_old = self.value_to_primval(arg_vals[1], ty)?;
101 let change = self.value_to_primval(arg_vals[2], ty)?;
102 let old = self.read_value(ptr, ty)?;
103 let old = match old {
104 Value::ByVal(val) => val,
105 Value::ByRef { .. } => bug!("just read the value, can't be byref"),
106 Value::ByValPair(..) => bug!("atomic_cxchg doesn't work with nonprimitives"),
108 let (val, _) = self.binary_op(mir::BinOp::Eq, old, ty, expect_old, ty)?;
109 let dest = self.force_allocation(dest)?.to_ptr()?;
110 self.write_pair_to_ptr(old, val, dest, dest_ty)?;
111 self.write_primval(Lvalue::from_primval_ptr(ptr), change, ty)?;
114 "atomic_or" | "atomic_or_acq" | "atomic_or_rel" | "atomic_or_acqrel" | "atomic_or_relaxed" |
115 "atomic_xor" | "atomic_xor_acq" | "atomic_xor_rel" | "atomic_xor_acqrel" | "atomic_xor_relaxed" |
116 "atomic_and" | "atomic_and_acq" | "atomic_and_rel" | "atomic_and_acqrel" | "atomic_and_relaxed" |
117 "atomic_xadd" | "atomic_xadd_acq" | "atomic_xadd_rel" | "atomic_xadd_acqrel" | "atomic_xadd_relaxed" |
118 "atomic_xsub" | "atomic_xsub_acq" | "atomic_xsub_rel" | "atomic_xsub_acqrel" | "atomic_xsub_relaxed" => {
119 let ty = substs.type_at(0);
120 let ptr = arg_vals[0].into_ptr(&self.memory)?;
121 let change = self.value_to_primval(arg_vals[1], ty)?;
122 let old = self.read_value(ptr, ty)?;
123 let old = match old {
124 Value::ByVal(val) => val,
125 Value::ByRef { .. } => bug!("just read the value, can't be byref"),
126 Value::ByValPair(..) => bug!("atomic_xadd_relaxed doesn't work with nonprimitives"),
128 self.write_primval(dest, old, ty)?;
129 let op = match intrinsic_name.split('_').nth(1).unwrap() {
130 "or" => mir::BinOp::BitOr,
131 "xor" => mir::BinOp::BitXor,
132 "and" => mir::BinOp::BitAnd,
133 "xadd" => mir::BinOp::Add,
134 "xsub" => mir::BinOp::Sub,
137 // FIXME: what do atomics do on overflow?
138 let (val, _) = self.binary_op(op, old, ty, change, ty)?;
139 self.write_primval(Lvalue::from_primval_ptr(ptr), val, ty)?;
142 "breakpoint" => unimplemented!(), // halt miri
145 "copy_nonoverlapping" => {
146 let elem_ty = substs.type_at(0);
147 let elem_size = self.type_size(elem_ty)?.expect("cannot copy unsized value");
148 let count = self.value_to_primval(arg_vals[2], usize)?.to_u64()?;
149 if count * elem_size != 0 {
150 // TODO: We do not even validate alignment for the 0-bytes case. libstd relies on this in vec::IntoIter::next.
151 // Also see the write_bytes intrinsic.
152 let elem_align = self.type_align(elem_ty)?;
153 let src = arg_vals[0].into_ptr(&self.memory)?;
154 let dest = arg_vals[1].into_ptr(&self.memory)?;
155 self.memory.copy(src, dest, count * elem_size, elem_align, intrinsic_name.ends_with("_nonoverlapping"))?;
165 let ty = substs.type_at(0);
166 let num = self.value_to_primval(arg_vals[0], ty)?.to_bytes()?;
167 let kind = self.ty_to_primval_kind(ty)?;
168 let num = if intrinsic_name.ends_with("_nonzero") {
170 return Err(EvalError::Intrinsic(format!("{} called on 0", intrinsic_name)))
172 numeric_intrinsic(intrinsic_name.trim_right_matches("_nonzero"), num, kind)?
174 numeric_intrinsic(intrinsic_name, num, kind)?
176 self.write_primval(dest, num, ty)?;
179 "discriminant_value" => {
180 let ty = substs.type_at(0);
181 let adt_ptr = arg_vals[0].into_ptr(&self.memory)?.to_ptr()?;
182 let discr_val = self.read_discriminant_value(adt_ptr, ty)?;
183 self.write_primval(dest, PrimVal::Bytes(discr_val), dest_ty)?;
186 "sinf32" | "fabsf32" | "cosf32" |
187 "sqrtf32" | "expf32" | "exp2f32" |
188 "logf32" | "log10f32" | "log2f32" |
189 "floorf32" | "ceilf32" | "truncf32" => {
190 let f = self.value_to_primval(arg_vals[0], f32)?.to_f32()?;
191 let f = match intrinsic_name {
193 "fabsf32" => f.abs(),
195 "sqrtf32" => f.sqrt(),
197 "exp2f32" => f.exp2(),
199 "log10f32" => f.log10(),
200 "log2f32" => f.log2(),
201 "floorf32" => f.floor(),
202 "ceilf32" => f.ceil(),
203 "truncf32" => f.trunc(),
206 self.write_primval(dest, PrimVal::from_f32(f), dest_ty)?;
209 "sinf64" | "fabsf64" | "cosf64" |
210 "sqrtf64" | "expf64" | "exp2f64" |
211 "logf64" | "log10f64" | "log2f64" |
212 "floorf64" | "ceilf64" | "truncf64" => {
213 let f = self.value_to_primval(arg_vals[0], f64)?.to_f64()?;
214 let f = match intrinsic_name {
216 "fabsf64" => f.abs(),
218 "sqrtf64" => f.sqrt(),
220 "exp2f64" => f.exp2(),
222 "log10f64" => f.log10(),
223 "log2f64" => f.log2(),
224 "floorf64" => f.floor(),
225 "ceilf64" => f.ceil(),
226 "truncf64" => f.trunc(),
229 self.write_primval(dest, PrimVal::from_f64(f), dest_ty)?;
232 "fadd_fast" | "fsub_fast" | "fmul_fast" | "fdiv_fast" | "frem_fast" => {
233 let ty = substs.type_at(0);
234 let a = self.value_to_primval(arg_vals[0], ty)?;
235 let b = self.value_to_primval(arg_vals[1], ty)?;
236 let op = match intrinsic_name {
237 "fadd_fast" => mir::BinOp::Add,
238 "fsub_fast" => mir::BinOp::Sub,
239 "fmul_fast" => mir::BinOp::Mul,
240 "fdiv_fast" => mir::BinOp::Div,
241 "frem_fast" => mir::BinOp::Rem,
244 let result = self.binary_op(op, a, ty, b, ty)?;
245 self.write_primval(dest, result.0, dest_ty)?;
253 let size = self.type_size(dest_ty)?.expect("cannot zero unsized value");
254 let init = |this: &mut Self, val: Value| {
255 let zero_val = match val {
256 Value::ByRef { ptr, aligned } => {
257 // These writes have no alignment restriction anyway.
258 this.memory.write_repeat(ptr, 0, size)?;
259 Value::ByRef { ptr, aligned }
261 // TODO(solson): Revisit this, it's fishy to check for Undef here.
262 Value::ByVal(PrimVal::Undef) => match this.ty_to_primval_kind(dest_ty) {
263 Ok(_) => Value::ByVal(PrimVal::Bytes(0)),
265 let ptr = this.alloc_ptr_with_substs(dest_ty, substs)?;
266 let ptr = Pointer::from(PrimVal::Ptr(ptr));
267 this.memory.write_repeat(ptr, 0, size)?;
271 Value::ByVal(_) => Value::ByVal(PrimVal::Bytes(0)),
272 Value::ByValPair(..) =>
273 Value::ByValPair(PrimVal::Bytes(0), PrimVal::Bytes(0)),
278 Lvalue::Local { frame, local } => self.modify_local(frame, local, init)?,
279 Lvalue::Ptr { ptr, extra: LvalueExtra::None, aligned: true } => self.memory.write_repeat(ptr, 0, size)?,
280 Lvalue::Ptr { .. } => bug!("init intrinsic tried to write to fat or unaligned ptr target"),
281 Lvalue::Global(cid) => self.modify_global(cid, init)?,
286 let elem_ty = substs.type_at(0);
287 let elem_align = self.type_align(elem_ty)?;
288 let align_val = PrimVal::from_u128(elem_align as u128);
289 self.write_primval(dest, align_val, dest_ty)?;
293 let ty = substs.type_at(0);
294 let layout = self.type_layout(ty)?;
295 let align = layout.align(&self.tcx.data_layout).pref();
296 let align_val = PrimVal::from_u128(align as u128);
297 self.write_primval(dest, align_val, dest_ty)?;
301 let ty = substs.type_at(0);
302 let ptr = arg_vals[0].into_ptr(&self.memory)?;
303 self.write_value_to_ptr(arg_vals[1], ptr, ty)?;
307 let ty = substs.type_at(0);
308 let env = ty::ParamEnv::empty(Reveal::All);
309 let needs_drop = ty.needs_drop(self.tcx, env);
310 self.write_primval(dest, PrimVal::from_bool(needs_drop), dest_ty)?;
314 let offset = self.value_to_primval(arg_vals[1], isize)?.to_i128()? as i64;
315 let ptr = arg_vals[0].into_ptr(&self.memory)?;
316 let result_ptr = self.pointer_offset(ptr, substs.type_at(0), offset)?;
317 self.write_ptr(dest, result_ptr, dest_ty)?;
320 "overflowing_sub" => {
321 self.intrinsic_overflowing(mir::BinOp::Sub, &args[0], &args[1], dest, dest_ty)?;
324 "overflowing_mul" => {
325 self.intrinsic_overflowing(mir::BinOp::Mul, &args[0], &args[1], dest, dest_ty)?;
328 "overflowing_add" => {
329 self.intrinsic_overflowing(mir::BinOp::Add, &args[0], &args[1], dest, dest_ty)?;
333 let f = self.value_to_primval(arg_vals[0], f32)?.to_f32()?;
334 let f2 = self.value_to_primval(arg_vals[1], f32)?.to_f32()?;
335 self.write_primval(dest, PrimVal::from_f32(f.powf(f2)), dest_ty)?;
339 let f = self.value_to_primval(arg_vals[0], f64)?.to_f64()?;
340 let f2 = self.value_to_primval(arg_vals[1], f64)?.to_f64()?;
341 self.write_primval(dest, PrimVal::from_f64(f.powf(f2)), dest_ty)?;
345 let a = self.value_to_primval(arg_vals[0], f32)?.to_f32()?;
346 let b = self.value_to_primval(arg_vals[1], f32)?.to_f32()?;
347 let c = self.value_to_primval(arg_vals[2], f32)?.to_f32()?;
348 self.write_primval(dest, PrimVal::from_f32(a * b + c), dest_ty)?;
352 let a = self.value_to_primval(arg_vals[0], f64)?.to_f64()?;
353 let b = self.value_to_primval(arg_vals[1], f64)?.to_f64()?;
354 let c = self.value_to_primval(arg_vals[2], f64)?.to_f64()?;
355 self.write_primval(dest, PrimVal::from_f64(a * b + c), dest_ty)?;
359 let f = self.value_to_primval(arg_vals[0], f32)?.to_f32()?;
360 let i = self.value_to_primval(arg_vals[1], i32)?.to_i128()?;
361 self.write_primval(dest, PrimVal::from_f32(f.powi(i as i32)), dest_ty)?;
365 let f = self.value_to_primval(arg_vals[0], f64)?.to_f64()?;
366 let i = self.value_to_primval(arg_vals[1], i32)?.to_i128()?;
367 self.write_primval(dest, PrimVal::from_f64(f.powi(i as i32)), dest_ty)?;
371 let ty = substs.type_at(0);
372 let size = self.type_size(ty)?.expect("size_of intrinsic called on unsized value") as u128;
373 self.write_primval(dest, PrimVal::from_u128(size), dest_ty)?;
377 let ty = substs.type_at(0);
378 let (size, _) = self.size_and_align_of_dst(ty, arg_vals[0])?;
379 self.write_primval(dest, PrimVal::from_u128(size as u128), dest_ty)?;
384 let ty = substs.type_at(0);
385 let (_, align) = self.size_and_align_of_dst(ty, arg_vals[0])?;
386 self.write_primval(dest, PrimVal::from_u128(align as u128), dest_ty)?;
390 let ty = substs.type_at(0);
391 let ty_name = ty.to_string();
392 let s = self.str_to_value(&ty_name)?;
393 self.write_value(s, dest, dest_ty)?;
396 let ty = substs.type_at(0);
397 let n = self.tcx.type_id_hash(ty);
398 self.write_primval(dest, PrimVal::Bytes(n as u128), dest_ty)?;
402 let src_ty = substs.type_at(0);
403 let ptr = self.force_allocation(dest)?.to_ptr()?;
404 self.write_maybe_aligned_mut(/*aligned*/false, |ectx| {
405 ectx.write_value_to_ptr(arg_vals[0], ptr.into(), src_ty)
410 let bits = self.type_size(dest_ty)?.expect("intrinsic can't be called on unsized type") as u128 * 8;
411 let rhs = self.value_to_primval(arg_vals[1], substs.type_at(0))?.to_bytes()?;
413 return Err(EvalError::Intrinsic(format!("Overflowing shift by {} in unchecked_shl", rhs)));
415 self.intrinsic_overflowing(mir::BinOp::Shl, &args[0], &args[1], dest, dest_ty)?;
419 let bits = self.type_size(dest_ty)?.expect("intrinsic can't be called on unsized type") as u128 * 8;
420 let rhs = self.value_to_primval(arg_vals[1], substs.type_at(0))?.to_bytes()?;
422 return Err(EvalError::Intrinsic(format!("Overflowing shift by {} in unchecked_shr", rhs)));
424 self.intrinsic_overflowing(mir::BinOp::Shr, &args[0], &args[1], dest, dest_ty)?;
428 let rhs = self.value_to_primval(arg_vals[1], substs.type_at(0))?.to_bytes()?;
430 return Err(EvalError::Intrinsic(format!("Division by 0 in unchecked_div")));
432 self.intrinsic_overflowing(mir::BinOp::Div, &args[0], &args[1], dest, dest_ty)?;
436 let rhs = self.value_to_primval(arg_vals[1], substs.type_at(0))?.to_bytes()?;
438 return Err(EvalError::Intrinsic(format!("Division by 0 in unchecked_rem")));
440 self.intrinsic_overflowing(mir::BinOp::Rem, &args[0], &args[1], dest, dest_ty)?;
444 let size = dest_layout.size(&self.tcx.data_layout).bytes();
445 let uninit = |this: &mut Self, val: Value| {
447 Value::ByRef { ptr, aligned } => {
448 this.memory.mark_definedness(ptr, size, false)?;
449 Ok(Value::ByRef { ptr, aligned })
451 _ => Ok(Value::ByVal(PrimVal::Undef)),
455 Lvalue::Local { frame, local } => self.modify_local(frame, local, uninit)?,
456 Lvalue::Ptr { ptr, extra: LvalueExtra::None, aligned: true } =>
457 self.memory.mark_definedness(ptr, size, false)?,
458 Lvalue::Ptr { .. } => bug!("uninit intrinsic tried to write to fat or unaligned ptr target"),
459 Lvalue::Global(cid) => self.modify_global(cid, uninit)?,
464 let u8 = self.tcx.types.u8;
465 let ty = substs.type_at(0);
466 let ty_align = self.type_align(ty)?;
467 let val_byte = self.value_to_primval(arg_vals[1], u8)?.to_u128()? as u8;
468 let size = self.type_size(ty)?.expect("write_bytes() type must be sized");
469 let ptr = arg_vals[0].into_ptr(&self.memory)?;
470 let count = self.value_to_primval(arg_vals[2], usize)?.to_u64()?;
472 // HashMap relies on write_bytes on a NULL ptr with count == 0 to work
473 // TODO: Should we, at least, validate the alignment? (Also see the copy intrinsic)
474 self.memory.check_align(ptr, ty_align)?;
475 self.memory.write_repeat(ptr, val_byte, size * count)?;
479 name => return Err(EvalError::Unimplemented(format!("unimplemented intrinsic: {}", name))),
482 self.goto_block(target);
484 // Since we pushed no stack frame, the main loop will act
485 // as if the call just completed and it's returning to the
491 fn numeric_intrinsic<'tcx>(
495 ) -> EvalResult<'tcx, PrimVal> {
496 macro_rules! integer_intrinsic {
497 ($method:ident) => ({
498 use interpret::PrimValKind::*;
499 let result_bytes = match kind {
500 I8 => (bytes as i8).$method() as u128,
501 U8 => (bytes as u8).$method() as u128,
502 I16 => (bytes as i16).$method() as u128,
503 U16 => (bytes as u16).$method() as u128,
504 I32 => (bytes as i32).$method() as u128,
505 U32 => (bytes as u32).$method() as u128,
506 I64 => (bytes as i64).$method() as u128,
507 U64 => (bytes as u64).$method() as u128,
508 I128 => (bytes as i128).$method() as u128,
509 U128 => bytes.$method() as u128,
510 _ => bug!("invalid `{}` argument: {:?}", name, bytes),
513 PrimVal::Bytes(result_bytes)
517 let result_val = match name {
518 "bswap" => integer_intrinsic!(swap_bytes),
519 "ctlz" => integer_intrinsic!(leading_zeros),
520 "ctpop" => integer_intrinsic!(count_ones),
521 "cttz" => integer_intrinsic!(trailing_zeros),
522 _ => bug!("not a numeric intrinsic: {}", name),