2 use rustc::traits::Reveal;
3 use rustc::ty::layout::{Layout, Size, Align};
4 use rustc::ty::subst::Substs;
5 use rustc::ty::{self, Ty};
11 PrimVal, PrimValKind, Value, Pointer,
16 impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
17 pub(super) fn call_intrinsic(
19 instance: ty::Instance<'tcx>,
20 args: &[mir::Operand<'tcx>],
23 dest_layout: &'tcx Layout,
24 target: mir::BasicBlock,
25 ) -> EvalResult<'tcx> {
26 let arg_vals: EvalResult<Vec<Value>> = args.iter()
27 .map(|arg| self.eval_operand(arg))
29 let arg_vals = arg_vals?;
30 let i32 = self.tcx.types.i32;
31 let isize = self.tcx.types.isize;
32 let usize = self.tcx.types.usize;
33 let f32 = self.tcx.types.f32;
34 let f64 = self.tcx.types.f64;
35 let substs = instance.substs;
37 let intrinsic_name = &self.tcx.item_name(instance.def_id()).as_str()[..];
38 match intrinsic_name {
39 "add_with_overflow" =>
40 self.intrinsic_with_overflow(mir::BinOp::Add, &args[0], &args[1], dest, dest_ty)?,
42 "sub_with_overflow" =>
43 self.intrinsic_with_overflow(mir::BinOp::Sub, &args[0], &args[1], dest, dest_ty)?,
45 "mul_with_overflow" =>
46 self.intrinsic_with_overflow(mir::BinOp::Mul, &args[0], &args[1], dest, dest_ty)?,
50 let offset = self.value_to_primval(arg_vals[1], isize)?.to_i128()? as i64;
51 let ptr = arg_vals[0].into_ptr(&self.memory)?;
52 let result_ptr = self.wrapping_pointer_offset(ptr, substs.type_at(0), offset)?;
53 self.write_ptr(dest, result_ptr, dest_ty)?;
57 let bool = self.tcx.types.bool;
58 let cond = self.value_to_primval(arg_vals[0], bool)?.to_bool()?;
59 if !cond { return Err(EvalError::AssumptionNotHeld); }
63 "atomic_load_relaxed" |
66 let ty = substs.type_at(0);
67 let ptr = arg_vals[0].into_ptr(&self.memory)?;
68 self.write_value(Value::by_ref(ptr), dest, ty)?;
72 "atomic_store_relaxed" |
75 let ty = substs.type_at(0);
76 let dest = arg_vals[0].into_ptr(&self.memory)?;
77 self.write_value_to_ptr(arg_vals[1], dest, ty)?;
80 "atomic_fence_acq" => {
81 // we are inherently singlethreaded and singlecored, this is a nop
84 _ if intrinsic_name.starts_with("atomic_xchg") => {
85 let ty = substs.type_at(0);
86 let ptr = arg_vals[0].into_ptr(&self.memory)?;
87 let change = self.value_to_primval(arg_vals[1], ty)?;
88 let old = self.read_value(ptr, ty)?;
90 Value::ByVal(val) => val,
91 Value::ByRef { .. } => bug!("just read the value, can't be byref"),
92 Value::ByValPair(..) => bug!("atomic_xchg doesn't work with nonprimitives"),
94 self.write_primval(dest, old, ty)?;
95 self.write_primval(Lvalue::from_primval_ptr(ptr), change, ty)?;
98 _ if intrinsic_name.starts_with("atomic_cxchg") => {
99 let ty = substs.type_at(0);
100 let ptr = arg_vals[0].into_ptr(&self.memory)?;
101 let expect_old = self.value_to_primval(arg_vals[1], ty)?;
102 let change = self.value_to_primval(arg_vals[2], ty)?;
103 let old = self.read_value(ptr, ty)?;
104 let old = match old {
105 Value::ByVal(val) => val,
106 Value::ByRef { .. } => bug!("just read the value, can't be byref"),
107 Value::ByValPair(..) => bug!("atomic_cxchg doesn't work with nonprimitives"),
109 let (val, _) = self.binary_op(mir::BinOp::Eq, old, ty, expect_old, ty)?;
110 let dest = self.force_allocation(dest)?.to_ptr()?;
111 self.write_pair_to_ptr(old, val, dest, dest_ty)?;
112 self.write_primval(Lvalue::from_primval_ptr(ptr), change, ty)?;
115 "atomic_or" | "atomic_or_acq" | "atomic_or_rel" | "atomic_or_acqrel" | "atomic_or_relaxed" |
116 "atomic_xor" | "atomic_xor_acq" | "atomic_xor_rel" | "atomic_xor_acqrel" | "atomic_xor_relaxed" |
117 "atomic_and" | "atomic_and_acq" | "atomic_and_rel" | "atomic_and_acqrel" | "atomic_and_relaxed" |
118 "atomic_xadd" | "atomic_xadd_acq" | "atomic_xadd_rel" | "atomic_xadd_acqrel" | "atomic_xadd_relaxed" |
119 "atomic_xsub" | "atomic_xsub_acq" | "atomic_xsub_rel" | "atomic_xsub_acqrel" | "atomic_xsub_relaxed" => {
120 let ty = substs.type_at(0);
121 let ptr = arg_vals[0].into_ptr(&self.memory)?;
122 let change = self.value_to_primval(arg_vals[1], ty)?;
123 let old = self.read_value(ptr, ty)?;
124 let old = match old {
125 Value::ByVal(val) => val,
126 Value::ByRef { .. } => bug!("just read the value, can't be byref"),
127 Value::ByValPair(..) => bug!("atomic_xadd_relaxed doesn't work with nonprimitives"),
129 self.write_primval(dest, old, ty)?;
130 let op = match intrinsic_name.split('_').nth(1).unwrap() {
131 "or" => mir::BinOp::BitOr,
132 "xor" => mir::BinOp::BitXor,
133 "and" => mir::BinOp::BitAnd,
134 "xadd" => mir::BinOp::Add,
135 "xsub" => mir::BinOp::Sub,
138 // FIXME: what do atomics do on overflow?
139 let (val, _) = self.binary_op(op, old, ty, change, ty)?;
140 self.write_primval(Lvalue::from_primval_ptr(ptr), val, ty)?;
143 "breakpoint" => unimplemented!(), // halt miri
146 "copy_nonoverlapping" => {
147 let elem_ty = substs.type_at(0);
148 let elem_size = self.type_size(elem_ty)?.expect("cannot copy unsized value");
149 let count = self.value_to_primval(arg_vals[2], usize)?.to_u64()?;
150 if count * elem_size != 0 {
151 // TODO: We do not even validate alignment for the 0-bytes case. libstd relies on this in vec::IntoIter::next.
152 // Also see the write_bytes intrinsic.
153 let elem_align = self.type_align(elem_ty)?;
154 let src = arg_vals[0].into_ptr(&self.memory)?;
155 let dest = arg_vals[1].into_ptr(&self.memory)?;
156 self.memory.copy(src, dest, count * elem_size, elem_align, intrinsic_name.ends_with("_nonoverlapping"))?;
166 let ty = substs.type_at(0);
167 let num = self.value_to_primval(arg_vals[0], ty)?.to_bytes()?;
168 let kind = self.ty_to_primval_kind(ty)?;
169 let num = if intrinsic_name.ends_with("_nonzero") {
171 return Err(EvalError::Intrinsic(format!("{} called on 0", intrinsic_name)))
173 numeric_intrinsic(intrinsic_name.trim_right_matches("_nonzero"), num, kind)?
175 numeric_intrinsic(intrinsic_name, num, kind)?
177 self.write_primval(dest, num, ty)?;
180 "discriminant_value" => {
181 let ty = substs.type_at(0);
182 let adt_ptr = arg_vals[0].into_ptr(&self.memory)?.to_ptr()?;
183 let discr_val = self.read_discriminant_value(adt_ptr, ty)?;
184 self.write_primval(dest, PrimVal::Bytes(discr_val), dest_ty)?;
187 "sinf32" | "fabsf32" | "cosf32" |
188 "sqrtf32" | "expf32" | "exp2f32" |
189 "logf32" | "log10f32" | "log2f32" |
190 "floorf32" | "ceilf32" | "truncf32" => {
191 let f = self.value_to_primval(arg_vals[0], f32)?.to_f32()?;
192 let f = match intrinsic_name {
194 "fabsf32" => f.abs(),
196 "sqrtf32" => f.sqrt(),
198 "exp2f32" => f.exp2(),
200 "log10f32" => f.log10(),
201 "log2f32" => f.log2(),
202 "floorf32" => f.floor(),
203 "ceilf32" => f.ceil(),
204 "truncf32" => f.trunc(),
207 self.write_primval(dest, PrimVal::from_f32(f), dest_ty)?;
210 "sinf64" | "fabsf64" | "cosf64" |
211 "sqrtf64" | "expf64" | "exp2f64" |
212 "logf64" | "log10f64" | "log2f64" |
213 "floorf64" | "ceilf64" | "truncf64" => {
214 let f = self.value_to_primval(arg_vals[0], f64)?.to_f64()?;
215 let f = match intrinsic_name {
217 "fabsf64" => f.abs(),
219 "sqrtf64" => f.sqrt(),
221 "exp2f64" => f.exp2(),
223 "log10f64" => f.log10(),
224 "log2f64" => f.log2(),
225 "floorf64" => f.floor(),
226 "ceilf64" => f.ceil(),
227 "truncf64" => f.trunc(),
230 self.write_primval(dest, PrimVal::from_f64(f), dest_ty)?;
233 "fadd_fast" | "fsub_fast" | "fmul_fast" | "fdiv_fast" | "frem_fast" => {
234 let ty = substs.type_at(0);
235 let a = self.value_to_primval(arg_vals[0], ty)?;
236 let b = self.value_to_primval(arg_vals[1], ty)?;
237 let op = match intrinsic_name {
238 "fadd_fast" => mir::BinOp::Add,
239 "fsub_fast" => mir::BinOp::Sub,
240 "fmul_fast" => mir::BinOp::Mul,
241 "fdiv_fast" => mir::BinOp::Div,
242 "frem_fast" => mir::BinOp::Rem,
245 let result = self.binary_op(op, a, ty, b, ty)?;
246 self.write_primval(dest, result.0, dest_ty)?;
254 let size = self.type_size(dest_ty)?.expect("cannot zero unsized value");
255 let init = |this: &mut Self, val: Value| {
256 let zero_val = match val {
257 Value::ByRef { ptr, aligned } => {
258 // These writes have no alignment restriction anyway.
259 this.memory.write_repeat(ptr, 0, size)?;
260 Value::ByRef { ptr, aligned }
262 // TODO(solson): Revisit this, it's fishy to check for Undef here.
263 Value::ByVal(PrimVal::Undef) => match this.ty_to_primval_kind(dest_ty) {
264 Ok(_) => Value::ByVal(PrimVal::Bytes(0)),
266 let ptr = this.alloc_ptr_with_substs(dest_ty, substs)?;
267 let ptr = Pointer::from(PrimVal::Ptr(ptr));
268 this.memory.write_repeat(ptr, 0, size)?;
272 Value::ByVal(_) => Value::ByVal(PrimVal::Bytes(0)),
273 Value::ByValPair(..) =>
274 Value::ByValPair(PrimVal::Bytes(0), PrimVal::Bytes(0)),
279 Lvalue::Local { frame, local } => self.modify_local(frame, local, init)?,
280 Lvalue::Ptr { ptr, extra: LvalueExtra::None, aligned: true } => self.memory.write_repeat(ptr, 0, size)?,
281 Lvalue::Ptr { .. } => bug!("init intrinsic tried to write to fat or unaligned ptr target"),
282 Lvalue::Global(cid) => self.modify_global(cid, init)?,
287 let elem_ty = substs.type_at(0);
288 let elem_align = self.type_align(elem_ty)?;
289 let align_val = PrimVal::from_u128(elem_align as u128);
290 self.write_primval(dest, align_val, dest_ty)?;
294 let ty = substs.type_at(0);
295 let layout = self.type_layout(ty)?;
296 let align = layout.align(&self.tcx.data_layout).pref();
297 let align_val = PrimVal::from_u128(align as u128);
298 self.write_primval(dest, align_val, dest_ty)?;
302 let ty = substs.type_at(0);
303 let ptr = arg_vals[0].into_ptr(&self.memory)?;
304 self.write_value_to_ptr(arg_vals[1], ptr, ty)?;
308 let ty = substs.type_at(0);
309 let env = ty::ParamEnv::empty(Reveal::All);
310 let needs_drop = ty.needs_drop(self.tcx, env);
311 self.write_primval(dest, PrimVal::from_bool(needs_drop), dest_ty)?;
315 let offset = self.value_to_primval(arg_vals[1], isize)?.to_i128()? as i64;
316 let ptr = arg_vals[0].into_ptr(&self.memory)?;
317 let result_ptr = self.pointer_offset(ptr, substs.type_at(0), offset)?;
318 self.write_ptr(dest, result_ptr, dest_ty)?;
321 "overflowing_sub" => {
322 self.intrinsic_overflowing(mir::BinOp::Sub, &args[0], &args[1], dest, dest_ty)?;
325 "overflowing_mul" => {
326 self.intrinsic_overflowing(mir::BinOp::Mul, &args[0], &args[1], dest, dest_ty)?;
329 "overflowing_add" => {
330 self.intrinsic_overflowing(mir::BinOp::Add, &args[0], &args[1], dest, dest_ty)?;
334 let f = self.value_to_primval(arg_vals[0], f32)?.to_f32()?;
335 let f2 = self.value_to_primval(arg_vals[1], f32)?.to_f32()?;
336 self.write_primval(dest, PrimVal::from_f32(f.powf(f2)), dest_ty)?;
340 let f = self.value_to_primval(arg_vals[0], f64)?.to_f64()?;
341 let f2 = self.value_to_primval(arg_vals[1], f64)?.to_f64()?;
342 self.write_primval(dest, PrimVal::from_f64(f.powf(f2)), dest_ty)?;
346 let a = self.value_to_primval(arg_vals[0], f32)?.to_f32()?;
347 let b = self.value_to_primval(arg_vals[1], f32)?.to_f32()?;
348 let c = self.value_to_primval(arg_vals[2], f32)?.to_f32()?;
349 self.write_primval(dest, PrimVal::from_f32(a * b + c), dest_ty)?;
353 let a = self.value_to_primval(arg_vals[0], f64)?.to_f64()?;
354 let b = self.value_to_primval(arg_vals[1], f64)?.to_f64()?;
355 let c = self.value_to_primval(arg_vals[2], f64)?.to_f64()?;
356 self.write_primval(dest, PrimVal::from_f64(a * b + c), dest_ty)?;
360 let f = self.value_to_primval(arg_vals[0], f32)?.to_f32()?;
361 let i = self.value_to_primval(arg_vals[1], i32)?.to_i128()?;
362 self.write_primval(dest, PrimVal::from_f32(f.powi(i as i32)), dest_ty)?;
366 let f = self.value_to_primval(arg_vals[0], f64)?.to_f64()?;
367 let i = self.value_to_primval(arg_vals[1], i32)?.to_i128()?;
368 self.write_primval(dest, PrimVal::from_f64(f.powi(i as i32)), dest_ty)?;
372 let ty = substs.type_at(0);
373 let size = self.type_size(ty)?.expect("size_of intrinsic called on unsized value") as u128;
374 self.write_primval(dest, PrimVal::from_u128(size), dest_ty)?;
378 let ty = substs.type_at(0);
379 let (size, _) = self.size_and_align_of_dst(ty, arg_vals[0])?;
380 self.write_primval(dest, PrimVal::from_u128(size as u128), dest_ty)?;
385 let ty = substs.type_at(0);
386 let (_, align) = self.size_and_align_of_dst(ty, arg_vals[0])?;
387 self.write_primval(dest, PrimVal::from_u128(align as u128), dest_ty)?;
391 let ty = substs.type_at(0);
392 let ty_name = ty.to_string();
393 let s = self.str_to_value(&ty_name)?;
394 self.write_value(s, dest, dest_ty)?;
397 let ty = substs.type_at(0);
398 let n = self.tcx.type_id_hash(ty);
399 self.write_primval(dest, PrimVal::Bytes(n as u128), dest_ty)?;
403 let src_ty = substs.type_at(0);
404 let ptr = self.force_allocation(dest)?.to_ptr()?;
405 self.write_maybe_aligned_mut(/*aligned*/false, |ectx| {
406 ectx.write_value_to_ptr(arg_vals[0], ptr.into(), src_ty)
411 let bits = self.type_size(dest_ty)?.expect("intrinsic can't be called on unsized type") as u128 * 8;
412 let rhs = self.value_to_primval(arg_vals[1], substs.type_at(0))?.to_bytes()?;
414 return Err(EvalError::Intrinsic(format!("Overflowing shift by {} in unchecked_shl", rhs)));
416 self.intrinsic_overflowing(mir::BinOp::Shl, &args[0], &args[1], dest, dest_ty)?;
420 let bits = self.type_size(dest_ty)?.expect("intrinsic can't be called on unsized type") as u128 * 8;
421 let rhs = self.value_to_primval(arg_vals[1], substs.type_at(0))?.to_bytes()?;
423 return Err(EvalError::Intrinsic(format!("Overflowing shift by {} in unchecked_shr", rhs)));
425 self.intrinsic_overflowing(mir::BinOp::Shr, &args[0], &args[1], dest, dest_ty)?;
429 let rhs = self.value_to_primval(arg_vals[1], substs.type_at(0))?.to_bytes()?;
431 return Err(EvalError::Intrinsic(format!("Division by 0 in unchecked_div")));
433 self.intrinsic_overflowing(mir::BinOp::Div, &args[0], &args[1], dest, dest_ty)?;
437 let rhs = self.value_to_primval(arg_vals[1], substs.type_at(0))?.to_bytes()?;
439 return Err(EvalError::Intrinsic(format!("Division by 0 in unchecked_rem")));
441 self.intrinsic_overflowing(mir::BinOp::Rem, &args[0], &args[1], dest, dest_ty)?;
445 let size = dest_layout.size(&self.tcx.data_layout).bytes();
446 let uninit = |this: &mut Self, val: Value| {
448 Value::ByRef { ptr, aligned } => {
449 this.memory.mark_definedness(ptr, size, false)?;
450 Ok(Value::ByRef { ptr, aligned })
452 _ => Ok(Value::ByVal(PrimVal::Undef)),
456 Lvalue::Local { frame, local } => self.modify_local(frame, local, uninit)?,
457 Lvalue::Ptr { ptr, extra: LvalueExtra::None, aligned: true } =>
458 self.memory.mark_definedness(ptr, size, false)?,
459 Lvalue::Ptr { .. } => bug!("uninit intrinsic tried to write to fat or unaligned ptr target"),
460 Lvalue::Global(cid) => self.modify_global(cid, uninit)?,
465 let u8 = self.tcx.types.u8;
466 let ty = substs.type_at(0);
467 let ty_align = self.type_align(ty)?;
468 let val_byte = self.value_to_primval(arg_vals[1], u8)?.to_u128()? as u8;
469 let size = self.type_size(ty)?.expect("write_bytes() type must be sized");
470 let ptr = arg_vals[0].into_ptr(&self.memory)?;
471 let count = self.value_to_primval(arg_vals[2], usize)?.to_u64()?;
473 // HashMap relies on write_bytes on a NULL ptr with count == 0 to work
474 // TODO: Should we, at least, validate the alignment? (Also see the copy intrinsic)
475 self.memory.check_align(ptr, ty_align)?;
476 self.memory.write_repeat(ptr, val_byte, size * count)?;
480 name => return Err(EvalError::Unimplemented(format!("unimplemented intrinsic: {}", name))),
483 self.goto_block(target);
485 // Since we pushed no stack frame, the main loop will act
486 // as if the call just completed and it's returning to the
491 pub fn size_and_align_of_dst(
495 ) -> EvalResult<'tcx, (u64, u64)> {
496 if let Some(size) = self.type_size(ty)? {
497 Ok((size as u64, self.type_align(ty)? as u64))
500 ty::TyAdt(def, substs) => {
501 // First get the size of all statically known fields.
502 // Don't use type_of::sizing_type_of because that expects t to be sized,
503 // and it also rounds up to alignment, which we want to avoid,
504 // as the unsized field's alignment could be smaller.
505 assert!(!ty.is_simd());
506 let layout = self.type_layout(ty)?;
507 debug!("DST {} layout: {:?}", ty, layout);
509 let (sized_size, sized_align) = match *layout {
510 ty::layout::Layout::Univariant { ref variant, .. } => {
511 (variant.offsets.last().map_or(0, |o| o.bytes()), variant.align)
514 bug!("size_and_align_of_dst: expcted Univariant for `{}`, found {:#?}",
518 debug!("DST {} statically sized prefix size: {} align: {:?}",
519 ty, sized_size, sized_align);
521 // Recurse to get the size of the dynamically sized field (must be
523 let last_field = def.struct_variant().fields.last().unwrap();
524 let field_ty = self.field_ty(substs, last_field);
525 let (unsized_size, unsized_align) = self.size_and_align_of_dst(field_ty, value)?;
527 // FIXME (#26403, #27023): We should be adding padding
528 // to `sized_size` (to accommodate the `unsized_align`
529 // required of the unsized field that follows) before
530 // summing it with `sized_size`. (Note that since #26403
531 // is unfixed, we do not yet add the necessary padding
532 // here. But this is where the add would go.)
534 // Return the sum of sizes and max of aligns.
535 let size = sized_size + unsized_size;
537 // Choose max of two known alignments (combined value must
538 // be aligned according to more restrictive of the two).
539 let align = sized_align.max(Align::from_bytes(unsized_align, unsized_align).unwrap());
541 // Issue #27023: must add any necessary padding to `size`
542 // (to make it a multiple of `align`) before returning it.
544 // Namely, the returned size should be, in C notation:
546 // `size + ((size & (align-1)) ? align : 0)`
548 // emulated via the semi-standard fast bit trick:
550 // `(size + (align-1)) & -align`
552 let size = Size::from_bytes(size).abi_align(align).bytes();
553 Ok((size, align.abi()))
555 ty::TyDynamic(..) => {
556 let (_, vtable) = value.into_ptr_vtable_pair(&self.memory)?;
557 // the second entry in the vtable is the dynamic size of the object.
558 self.read_size_and_align_from_vtable(vtable)
561 ty::TySlice(_) | ty::TyStr => {
562 let elem_ty = ty.sequence_element_type(self.tcx);
563 let elem_size = self.type_size(elem_ty)?.expect("slice element must be sized") as u64;
564 let (_, len) = value.into_slice(&self.memory)?;
565 let align = self.type_align(elem_ty)?;
566 Ok((len * elem_size, align as u64))
569 _ => bug!("size_of_val::<{:?}>", ty),
573 /// Returns the normalized type of a struct field
576 param_substs: &Substs<'tcx>,
579 self.tcx.normalize_associated_type(&f.ty(self.tcx, param_substs))
583 fn numeric_intrinsic<'tcx>(
587 ) -> EvalResult<'tcx, PrimVal> {
588 macro_rules! integer_intrinsic {
589 ($method:ident) => ({
590 use interpret::PrimValKind::*;
591 let result_bytes = match kind {
592 I8 => (bytes as i8).$method() as u128,
593 U8 => (bytes as u8).$method() as u128,
594 I16 => (bytes as i16).$method() as u128,
595 U16 => (bytes as u16).$method() as u128,
596 I32 => (bytes as i32).$method() as u128,
597 U32 => (bytes as u32).$method() as u128,
598 I64 => (bytes as i64).$method() as u128,
599 U64 => (bytes as u64).$method() as u128,
600 I128 => (bytes as i128).$method() as u128,
601 U128 => bytes.$method() as u128,
602 _ => bug!("invalid `{}` argument: {:?}", name, bytes),
605 PrimVal::Bytes(result_bytes)
609 let result_val = match name {
610 "bswap" => integer_intrinsic!(swap_bytes),
611 "ctlz" => integer_intrinsic!(leading_zeros),
612 "ctpop" => integer_intrinsic!(count_ones),
613 "cttz" => integer_intrinsic!(trailing_zeros),
614 _ => bug!("not a numeric intrinsic: {}", name),