2 use rustc::traits::Reveal;
3 use rustc::ty::layout::TyLayout;
6 use rustc::mir::interpret::{EvalResult, Lvalue, LvalueExtra, PrimVal, PrimValKind, Value, Pointer,
7 HasMemory, AccessKind, EvalContext, PtrAndAlign, ValTy};
9 use helpers::EvalContextExt as HelperEvalContextExt;
11 pub trait EvalContextExt<'tcx> {
14 instance: ty::Instance<'tcx>,
17 dest_layout: TyLayout<'tcx>,
18 target: mir::BasicBlock,
19 ) -> EvalResult<'tcx>;
22 impl<'a, 'tcx> EvalContextExt<'tcx> for EvalContext<'a, 'tcx, super::Evaluator> {
25 instance: ty::Instance<'tcx>,
28 dest_layout: TyLayout<'tcx>,
29 target: mir::BasicBlock,
30 ) -> EvalResult<'tcx> {
31 let substs = instance.substs;
33 let intrinsic_name = &self.tcx.item_name(instance.def_id())[..];
34 match intrinsic_name {
36 // FIXME: return a real value in case the target allocation has an
37 // alignment bigger than the one requested
38 self.write_primval(dest, PrimVal::Bytes(u128::max_value()), dest_layout.ty)?;
41 "add_with_overflow" => {
42 self.intrinsic_with_overflow(
51 "sub_with_overflow" => {
52 self.intrinsic_with_overflow(
61 "mul_with_overflow" => {
62 self.intrinsic_with_overflow(
72 let offset = self.value_to_primval(args[1])?.to_i128()? as i64;
73 let ptr = args[0].into_ptr(&self.memory)?;
74 let result_ptr = self.wrapping_pointer_offset(ptr, substs.type_at(0), offset)?;
75 self.write_ptr(dest, result_ptr, dest_layout.ty)?;
79 let cond = self.value_to_primval(args[0])?.to_bool()?;
81 return err!(AssumptionNotHeld);
86 "atomic_load_relaxed" |
89 let ptr = args[0].into_ptr(&self.memory)?;
91 value: Value::by_ref(ptr),
92 ty: substs.type_at(0),
94 self.write_value(valty, dest)?;
98 "atomic_store_relaxed" |
100 "volatile_store" => {
101 let ty = substs.type_at(0);
102 let dest = args[0].into_ptr(&self.memory)?;
103 self.write_value_to_ptr(args[1].value, dest, ty)?;
106 "atomic_fence_acq" => {
107 // we are inherently singlethreaded and singlecored, this is a nop
110 _ if intrinsic_name.starts_with("atomic_xchg") => {
111 let ty = substs.type_at(0);
112 let ptr = args[0].into_ptr(&self.memory)?;
113 let change = self.value_to_primval(args[1])?;
114 let old = self.read_value(ptr, ty)?;
115 let old = match old {
116 Value::ByVal(val) => val,
117 Value::ByRef { .. } => bug!("just read the value, can't be byref"),
118 Value::ByValPair(..) => bug!("atomic_xchg doesn't work with nonprimitives"),
120 self.write_primval(dest, old, ty)?;
122 Lvalue::from_primval_ptr(ptr),
128 _ if intrinsic_name.starts_with("atomic_cxchg") => {
129 let ty = substs.type_at(0);
130 let ptr = args[0].into_ptr(&self.memory)?;
131 let expect_old = self.value_to_primval(args[1])?;
132 let change = self.value_to_primval(args[2])?;
133 let old = self.read_value(ptr, ty)?;
134 let old = match old {
135 Value::ByVal(val) => val,
136 Value::ByRef { .. } => bug!("just read the value, can't be byref"),
137 Value::ByValPair(..) => bug!("atomic_cxchg doesn't work with nonprimitives"),
139 let (val, _) = self.binary_op(mir::BinOp::Eq, old, ty, expect_old, ty)?;
141 value: Value::ByValPair(old, val),
144 self.write_value(valty, dest)?;
146 Lvalue::from_primval_ptr(ptr),
156 "atomic_or_relaxed" |
160 "atomic_xor_acqrel" |
161 "atomic_xor_relaxed" |
165 "atomic_and_acqrel" |
166 "atomic_and_relaxed" |
170 "atomic_xadd_acqrel" |
171 "atomic_xadd_relaxed" |
175 "atomic_xsub_acqrel" |
176 "atomic_xsub_relaxed" => {
177 let ty = substs.type_at(0);
178 let ptr = args[0].into_ptr(&self.memory)?;
179 let change = self.value_to_primval(args[1])?;
180 let old = self.read_value(ptr, ty)?;
181 let old = match old {
182 Value::ByVal(val) => val,
183 Value::ByRef { .. } => bug!("just read the value, can't be byref"),
184 Value::ByValPair(..) => {
185 bug!("atomic_xadd_relaxed doesn't work with nonprimitives")
188 self.write_primval(dest, old, ty)?;
189 let op = match intrinsic_name.split('_').nth(1).unwrap() {
190 "or" => mir::BinOp::BitOr,
191 "xor" => mir::BinOp::BitXor,
192 "and" => mir::BinOp::BitAnd,
193 "xadd" => mir::BinOp::Add,
194 "xsub" => mir::BinOp::Sub,
197 // FIXME: what do atomics do on overflow?
198 let (val, _) = self.binary_op(op, old, ty, change, ty)?;
199 self.write_primval(Lvalue::from_primval_ptr(ptr), val, ty)?;
202 "breakpoint" => unimplemented!(), // halt miri
205 "copy_nonoverlapping" => {
206 let elem_ty = substs.type_at(0);
207 let elem_size = self.type_size(elem_ty)?.expect("cannot copy unsized value");
208 let count = self.value_to_primval(args[2])?.to_u64()?;
209 if count * elem_size != 0 {
210 // TODO: We do not even validate alignment for the 0-bytes case. libstd relies on this in vec::IntoIter::next.
211 // Also see the write_bytes intrinsic.
212 let elem_align = self.type_align(elem_ty)?;
213 let src = args[0].into_ptr(&self.memory)?;
214 let dest = args[1].into_ptr(&self.memory)?;
220 intrinsic_name.ends_with("_nonoverlapping"),
225 "ctpop" | "cttz" | "cttz_nonzero" | "ctlz" | "ctlz_nonzero" | "bswap" => {
226 let ty = substs.type_at(0);
227 let num = self.value_to_primval(args[0])?.to_bytes()?;
228 let kind = self.ty_to_primval_kind(ty)?;
229 let num = if intrinsic_name.ends_with("_nonzero") {
231 return err!(Intrinsic(format!("{} called on 0", intrinsic_name)));
233 numeric_intrinsic(intrinsic_name.trim_right_matches("_nonzero"), num, kind)?
235 numeric_intrinsic(intrinsic_name, num, kind)?
237 self.write_primval(dest, num, ty)?;
240 "discriminant_value" => {
241 let ty = substs.type_at(0);
242 let adt_ptr = args[0].into_ptr(&self.memory)?;
243 let lval = Lvalue::from_primval_ptr(adt_ptr);
244 let discr_val = self.read_discriminant_value(lval, ty)?;
245 self.write_primval(dest, PrimVal::Bytes(discr_val), dest_layout.ty)?;
248 "sinf32" | "fabsf32" | "cosf32" | "sqrtf32" | "expf32" | "exp2f32" | "logf32" |
249 "log10f32" | "log2f32" | "floorf32" | "ceilf32" | "truncf32" => {
250 let f = self.value_to_primval(args[0])?.to_bytes()?;
251 let f = f32::from_bits(f as u32);
252 let f = match intrinsic_name {
254 "fabsf32" => f.abs(),
256 "sqrtf32" => f.sqrt(),
258 "exp2f32" => f.exp2(),
260 "log10f32" => f.log10(),
261 "log2f32" => f.log2(),
262 "floorf32" => f.floor(),
263 "ceilf32" => f.ceil(),
264 "truncf32" => f.trunc(),
267 self.write_primval(dest, PrimVal::Bytes(f.to_bits() as u128), dest_layout.ty)?;
270 "sinf64" | "fabsf64" | "cosf64" | "sqrtf64" | "expf64" | "exp2f64" | "logf64" |
271 "log10f64" | "log2f64" | "floorf64" | "ceilf64" | "truncf64" => {
272 let f = self.value_to_primval(args[0])?.to_bytes()?;
273 let f = f64::from_bits(f as u64);
274 let f = match intrinsic_name {
276 "fabsf64" => f.abs(),
278 "sqrtf64" => f.sqrt(),
280 "exp2f64" => f.exp2(),
282 "log10f64" => f.log10(),
283 "log2f64" => f.log2(),
284 "floorf64" => f.floor(),
285 "ceilf64" => f.ceil(),
286 "truncf64" => f.trunc(),
289 self.write_primval(dest, PrimVal::Bytes(f.to_bits() as u128), dest_layout.ty)?;
292 "fadd_fast" | "fsub_fast" | "fmul_fast" | "fdiv_fast" | "frem_fast" => {
293 let ty = substs.type_at(0);
294 let a = self.value_to_primval(args[0])?;
295 let b = self.value_to_primval(args[1])?;
296 let op = match intrinsic_name {
297 "fadd_fast" => mir::BinOp::Add,
298 "fsub_fast" => mir::BinOp::Sub,
299 "fmul_fast" => mir::BinOp::Mul,
300 "fdiv_fast" => mir::BinOp::Div,
301 "frem_fast" => mir::BinOp::Rem,
304 let result = self.binary_op(op, a, ty, b, ty)?;
305 self.write_primval(dest, result.0, dest_layout.ty)?;
308 "likely" | "unlikely" | "forget" => {}
311 let size = self.type_size(dest_layout.ty)?.expect("cannot zero unsized value");
312 let init = |this: &mut Self, val: Value| {
313 let zero_val = match val {
314 Value::ByRef(PtrAndAlign { ptr, .. }) => {
315 // These writes have no alignment restriction anyway.
316 this.memory.write_repeat(ptr, 0, size)?;
319 // TODO(solson): Revisit this, it's fishy to check for Undef here.
320 Value::ByVal(PrimVal::Undef) => {
321 match this.ty_to_primval_kind(dest_layout.ty) {
322 Ok(_) => Value::ByVal(PrimVal::Bytes(0)),
324 let ptr = this.alloc_ptr_with_substs(dest_layout.ty, substs)?;
325 let ptr = Pointer::from(PrimVal::Ptr(ptr));
326 this.memory.write_repeat(ptr, 0, size)?;
331 Value::ByVal(_) => Value::ByVal(PrimVal::Bytes(0)),
332 Value::ByValPair(..) => {
333 Value::ByValPair(PrimVal::Bytes(0), PrimVal::Bytes(0))
339 Lvalue::Local { frame, local } => self.modify_local(frame, local, init)?,
341 ptr: PtrAndAlign { ptr, aligned: true },
342 extra: LvalueExtra::None,
343 } => self.memory.write_repeat(ptr, 0, size)?,
344 Lvalue::Ptr { .. } => {
345 bug!("init intrinsic tried to write to fat or unaligned ptr target")
351 let elem_ty = substs.type_at(0);
352 let elem_align = self.type_align(elem_ty)?;
353 let align_val = PrimVal::from_u128(elem_align as u128);
354 self.write_primval(dest, align_val, dest_layout.ty)?;
358 let ty = substs.type_at(0);
359 let layout = self.type_layout(ty)?;
360 let align = layout.align.pref();
361 let align_val = PrimVal::from_u128(align as u128);
362 self.write_primval(dest, align_val, dest_layout.ty)?;
366 let ty = substs.type_at(0);
367 let ptr = args[0].into_ptr(&self.memory)?;
368 self.write_value_to_ptr(args[1].value, ptr, ty)?;
372 let ty = substs.type_at(0);
373 let env = ty::ParamEnv::empty(Reveal::All);
374 let needs_drop = ty.needs_drop(self.tcx, env);
377 PrimVal::from_bool(needs_drop),
383 let offset = self.value_to_primval(args[1])?.to_i128()? as i64;
384 let ptr = args[0].into_ptr(&self.memory)?;
385 let result_ptr = self.pointer_offset(ptr, substs.type_at(0), offset)?;
386 self.write_ptr(dest, result_ptr, dest_layout.ty)?;
389 "overflowing_sub" => {
390 self.intrinsic_overflowing(
399 "overflowing_mul" => {
400 self.intrinsic_overflowing(
409 "overflowing_add" => {
410 self.intrinsic_overflowing(
420 let f = self.value_to_primval(args[0])?.to_bytes()?;
421 let f = f32::from_bits(f as u32);
422 let f2 = self.value_to_primval(args[1])?.to_bytes()?;
423 let f2 = f32::from_bits(f2 as u32);
426 PrimVal::Bytes(f.powf(f2).to_bits() as u128),
432 let f = self.value_to_primval(args[0])?.to_bytes()?;
433 let f = f64::from_bits(f as u64);
434 let f2 = self.value_to_primval(args[1])?.to_bytes()?;
435 let f2 = f64::from_bits(f2 as u64);
438 PrimVal::Bytes(f.powf(f2).to_bits() as u128),
444 let a = self.value_to_primval(args[0])?.to_bytes()?;
445 let a = f32::from_bits(a as u32);
446 let b = self.value_to_primval(args[1])?.to_bytes()?;
447 let b = f32::from_bits(b as u32);
448 let c = self.value_to_primval(args[2])?.to_bytes()?;
449 let c = f32::from_bits(c as u32);
452 PrimVal::Bytes((a * b + c).to_bits() as u128),
458 let a = self.value_to_primval(args[0])?.to_bytes()?;
459 let a = f64::from_bits(a as u64);
460 let b = self.value_to_primval(args[1])?.to_bytes()?;
461 let b = f64::from_bits(b as u64);
462 let c = self.value_to_primval(args[2])?.to_bytes()?;
463 let c = f64::from_bits(c as u64);
466 PrimVal::Bytes((a * b + c).to_bits() as u128),
472 let f = self.value_to_primval(args[0])?.to_bytes()?;
473 let f = f32::from_bits(f as u32);
474 let i = self.value_to_primval(args[1])?.to_i128()?;
477 PrimVal::Bytes(f.powi(i as i32).to_bits() as u128),
483 let f = self.value_to_primval(args[0])?.to_bytes()?;
484 let f = f64::from_bits(f as u64);
485 let i = self.value_to_primval(args[1])?.to_i128()?;
488 PrimVal::Bytes(f.powi(i as i32).to_bits() as u128),
494 let ty = substs.type_at(0);
495 let size = self.type_size(ty)?.expect(
496 "size_of intrinsic called on unsized value",
498 self.write_primval(dest, PrimVal::from_u128(size), dest_layout.ty)?;
502 let ty = substs.type_at(0);
503 let (size, _) = self.size_and_align_of_dst(ty, args[0].value)?;
506 PrimVal::from_u128(size.bytes() as u128),
513 let ty = substs.type_at(0);
514 let (_, align) = self.size_and_align_of_dst(ty, args[0].value)?;
517 PrimVal::from_u128(align.abi() as u128),
523 let ty = substs.type_at(0);
524 let ty_name = ty.to_string();
525 let value = self.str_to_value(&ty_name)?;
526 self.write_value(ValTy { value, ty: dest_layout.ty }, dest)?;
529 let ty = substs.type_at(0);
530 let n = self.tcx.type_id_hash(ty);
531 self.write_primval(dest, PrimVal::Bytes(n as u128), dest_layout.ty)?;
535 let src_ty = substs.type_at(0);
536 let ptr = self.force_allocation(dest)?.to_ptr()?;
537 self.write_maybe_aligned_mut(
541 ectx.write_value_to_ptr(args[0].value, ptr.into(), src_ty)
547 let bits = self.type_size(dest_layout.ty)?.expect(
548 "intrinsic can't be called on unsized type",
550 let rhs = self.value_to_primval(args[1])?
553 return err!(Intrinsic(
554 format!("Overflowing shift by {} in unchecked_shl", rhs),
557 self.intrinsic_overflowing(
567 let bits = self.type_size(dest_layout.ty)?.expect(
568 "intrinsic can't be called on unsized type",
570 let rhs = self.value_to_primval(args[1])?
573 return err!(Intrinsic(
574 format!("Overflowing shift by {} in unchecked_shr", rhs),
577 self.intrinsic_overflowing(
587 let rhs = self.value_to_primval(args[1])?
590 return err!(Intrinsic(format!("Division by 0 in unchecked_div")));
592 self.intrinsic_overflowing(
602 let rhs = self.value_to_primval(args[1])?
605 return err!(Intrinsic(format!("Division by 0 in unchecked_rem")));
607 self.intrinsic_overflowing(
617 let size = dest_layout.size.bytes();
618 let uninit = |this: &mut Self, val: Value| match val {
619 Value::ByRef(PtrAndAlign { ptr, .. }) => {
620 this.memory.mark_definedness(ptr, size, false)?;
623 _ => Ok(Value::ByVal(PrimVal::Undef)),
626 Lvalue::Local { frame, local } => self.modify_local(frame, local, uninit)?,
628 ptr: PtrAndAlign { ptr, aligned: true },
629 extra: LvalueExtra::None,
630 } => self.memory.mark_definedness(ptr, size, false)?,
631 Lvalue::Ptr { .. } => {
632 bug!("uninit intrinsic tried to write to fat or unaligned ptr target")
638 let ty = substs.type_at(0);
639 let ty_align = self.type_align(ty)?;
640 let val_byte = self.value_to_primval(args[1])?.to_u128()? as u8;
641 let size = self.type_size(ty)?.expect(
642 "write_bytes() type must be sized",
644 let ptr = args[0].into_ptr(&self.memory)?;
645 let count = self.value_to_primval(args[2])?.to_u64()?;
647 // HashMap relies on write_bytes on a NULL ptr with count == 0 to work
648 // TODO: Should we, at least, validate the alignment? (Also see the copy intrinsic)
649 self.memory.check_align(ptr, ty_align, Some(AccessKind::Write))?;
650 self.memory.write_repeat(ptr, val_byte, size * count)?;
654 name => return err!(Unimplemented(format!("unimplemented intrinsic: {}", name))),
657 self.goto_block(target);
659 // Since we pushed no stack frame, the main loop will act
660 // as if the call just completed and it's returning to the
666 fn numeric_intrinsic<'tcx>(
670 ) -> EvalResult<'tcx, PrimVal> {
671 macro_rules! integer_intrinsic {
672 ($method:ident) => ({
673 use rustc::mir::interpret::PrimValKind::*;
674 let result_bytes = match kind {
675 I8 => (bytes as i8).$method() as u128,
676 U8 => (bytes as u8).$method() as u128,
677 I16 => (bytes as i16).$method() as u128,
678 U16 => (bytes as u16).$method() as u128,
679 I32 => (bytes as i32).$method() as u128,
680 U32 => (bytes as u32).$method() as u128,
681 I64 => (bytes as i64).$method() as u128,
682 U64 => (bytes as u64).$method() as u128,
683 I128 => (bytes as i128).$method() as u128,
684 U128 => bytes.$method() as u128,
685 _ => bug!("invalid `{}` argument: {:?}", name, bytes),
688 PrimVal::Bytes(result_bytes)
692 let result_val = match name {
693 "bswap" => integer_intrinsic!(swap_bytes),
694 "ctlz" => integer_intrinsic!(leading_zeros),
695 "ctpop" => integer_intrinsic!(count_ones),
696 "cttz" => integer_intrinsic!(trailing_zeros),
697 _ => bug!("not a numeric intrinsic: {}", name),