2 use rustc::ty::layout::{TyLayout, LayoutOf, Size, Primitive, Integer::*};
5 use rustc::mir::interpret::{EvalResult, Scalar, Value, ScalarMaybeUndef};
6 use rustc_mir::interpret::{Place, PlaceExtra, HasMemory, EvalContext, ValTy};
8 use helpers::EvalContextExt as HelperEvalContextExt;
12 pub trait EvalContextExt<'tcx> {
15 instance: ty::Instance<'tcx>,
18 dest_layout: TyLayout<'tcx>,
19 target: mir::BasicBlock,
20 ) -> EvalResult<'tcx>;
23 impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for EvalContext<'a, 'mir, 'tcx, super::Evaluator<'tcx>> {
26 instance: ty::Instance<'tcx>,
29 dest_layout: TyLayout<'tcx>,
30 target: mir::BasicBlock,
31 ) -> EvalResult<'tcx> {
32 let substs = instance.substs;
34 let intrinsic_name = &self.tcx.item_name(instance.def_id()).as_str()[..];
35 match intrinsic_name {
36 "add_with_overflow" => {
37 self.intrinsic_with_overflow(
46 "sub_with_overflow" => {
47 self.intrinsic_with_overflow(
56 "mul_with_overflow" => {
57 self.intrinsic_with_overflow(
67 let offset = self.value_to_isize(args[1])?;
68 let ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?;
69 let result_ptr = self.wrapping_pointer_offset(ptr, substs.type_at(0), offset)?;
70 self.write_ptr(dest, result_ptr, dest_layout.ty)?;
74 let cond = self.value_to_scalar(args[0])?.to_bool()?;
76 return err!(AssumptionNotHeld);
81 "atomic_load_relaxed" |
84 let ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?;
85 let align = self.layout_of(args[0].ty)?.align;
88 value: Value::ByRef(ptr, align),
89 ty: substs.type_at(0),
91 self.write_value(valty, dest)?;
95 "atomic_store_relaxed" |
98 let ty = substs.type_at(0);
99 let align = self.layout_of(ty)?.align;
100 let dest = self.into_ptr(args[0].value)?.unwrap_or_err()?;
101 self.write_value_to_ptr(args[1].value, dest, align, ty)?;
104 "atomic_fence_acq" => {
105 // we are inherently singlethreaded and singlecored, this is a nop
108 _ if intrinsic_name.starts_with("atomic_xchg") => {
109 let ty = substs.type_at(0);
110 let align = self.layout_of(ty)?.align;
111 let ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?;
112 let change = self.value_to_scalar(args[1])?;
113 let old = self.read_value(ptr, align, ty)?;
114 let old = match old {
115 Value::Scalar(val) => val,
116 Value::ByRef { .. } => bug!("just read the value, can't be byref"),
117 Value::ScalarPair(..) => bug!("atomic_xchg doesn't work with nonprimitives"),
119 self.write_scalar(dest, old, ty)?;
121 Place::from_scalar_ptr(ptr.into(), align),
127 _ if intrinsic_name.starts_with("atomic_cxchg") => {
128 let ty = substs.type_at(0);
129 let align = self.layout_of(ty)?.align;
130 let ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?;
131 let expect_old = self.value_to_scalar(args[1])?;
132 let change = self.value_to_scalar(args[2])?;
133 let old = self.read_value(ptr, align, ty)?;
134 let old = match old {
135 Value::Scalar(val) => val.unwrap_or_err()?,
136 Value::ByRef { .. } => bug!("just read the value, can't be byref"),
137 Value::ScalarPair(..) => bug!("atomic_cxchg doesn't work with nonprimitives"),
139 let (val, _) = self.binary_op(mir::BinOp::Eq, old, ty, expect_old, ty)?;
141 value: Value::ScalarPair(old.into(), val.into()),
144 self.write_value(valty, dest)?;
146 Place::from_scalar_ptr(ptr.into(), dest_layout.align),
156 "atomic_or_relaxed" |
160 "atomic_xor_acqrel" |
161 "atomic_xor_relaxed" |
165 "atomic_and_acqrel" |
166 "atomic_and_relaxed" |
170 "atomic_xadd_acqrel" |
171 "atomic_xadd_relaxed" |
175 "atomic_xsub_acqrel" |
176 "atomic_xsub_relaxed" => {
177 let ty = substs.type_at(0);
178 let align = self.layout_of(ty)?.align;
179 let ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?;
180 let change = self.value_to_scalar(args[1])?;
181 let old = self.read_value(ptr, align, ty)?;
182 let old = match old {
183 Value::Scalar(val) => val,
184 Value::ByRef { .. } => bug!("just read the value, can't be byref"),
185 Value::ScalarPair(..) => {
186 bug!("atomic_xadd_relaxed doesn't work with nonprimitives")
189 self.write_scalar(dest, old, ty)?;
190 let op = match intrinsic_name.split('_').nth(1).unwrap() {
191 "or" => mir::BinOp::BitOr,
192 "xor" => mir::BinOp::BitXor,
193 "and" => mir::BinOp::BitAnd,
194 "xadd" => mir::BinOp::Add,
195 "xsub" => mir::BinOp::Sub,
198 // FIXME: what do atomics do on overflow?
199 let (val, _) = self.binary_op(op, old.unwrap_or_err()?, ty, change, ty)?;
200 self.write_scalar(Place::from_scalar_ptr(ptr.into(), dest_layout.align), val, ty)?;
203 "breakpoint" => unimplemented!(), // halt miri
206 "copy_nonoverlapping" => {
207 let elem_ty = substs.type_at(0);
208 let elem_layout = self.layout_of(elem_ty)?;
209 let elem_size = elem_layout.size.bytes();
210 let count = self.value_to_usize(args[2])?;
211 if count * elem_size != 0 {
212 // TODO: We do not even validate alignment for the 0-bytes case. libstd relies on this in vec::IntoIter::next.
213 // Also see the write_bytes intrinsic.
214 let elem_align = elem_layout.align;
215 let src = self.into_ptr(args[0].value)?.unwrap_or_err()?;
216 let dest = self.into_ptr(args[1].value)?.unwrap_or_err()?;
222 Size::from_bytes(count * elem_size),
223 intrinsic_name.ends_with("_nonoverlapping"),
228 "ctpop" | "cttz" | "cttz_nonzero" | "ctlz" | "ctlz_nonzero" | "bswap" => {
229 let ty = substs.type_at(0);
230 let num = self.value_to_scalar(args[0])?.to_bytes()?;
231 let kind = match self.layout_of(ty)?.abi {
232 ty::layout::Abi::Scalar(ref scalar) => scalar.value,
233 _ => Err(::rustc::mir::interpret::EvalErrorKind::TypeNotPrimitive(ty))?,
235 let num = if intrinsic_name.ends_with("_nonzero") {
237 return err!(Intrinsic(format!("{} called on 0", intrinsic_name)));
239 numeric_intrinsic(intrinsic_name.trim_right_matches("_nonzero"), num, kind)?
241 numeric_intrinsic(intrinsic_name, num, kind)?
243 self.write_scalar(dest, num, ty)?;
246 "discriminant_value" => {
247 let ty = substs.type_at(0);
248 let layout = self.layout_of(ty)?;
249 let adt_ptr = self.into_ptr(args[0].value)?;
250 let adt_align = self.layout_of(args[0].ty)?.align;
251 let place = Place::from_scalar_ptr(adt_ptr, adt_align);
252 let discr_val = self.read_discriminant_value(place, layout)?;
253 self.write_scalar(dest, Scalar::from_uint(discr_val, dest_layout.size), dest_layout.ty)?;
256 "sinf32" | "fabsf32" | "cosf32" | "sqrtf32" | "expf32" | "exp2f32" | "logf32" |
257 "log10f32" | "log2f32" | "floorf32" | "ceilf32" | "truncf32" => {
258 let f = self.value_to_scalar(args[0])?.to_bytes()?;
259 let f = f32::from_bits(f as u32);
260 let f = match intrinsic_name {
262 "fabsf32" => f.abs(),
264 "sqrtf32" => f.sqrt(),
266 "exp2f32" => f.exp2(),
268 "log10f32" => f.log10(),
269 "log2f32" => f.log2(),
270 "floorf32" => f.floor(),
271 "ceilf32" => f.ceil(),
272 "truncf32" => f.trunc(),
275 self.write_scalar(dest, Scalar::from_f32(f), dest_layout.ty)?;
278 "sinf64" | "fabsf64" | "cosf64" | "sqrtf64" | "expf64" | "exp2f64" | "logf64" |
279 "log10f64" | "log2f64" | "floorf64" | "ceilf64" | "truncf64" => {
280 let f = self.value_to_scalar(args[0])?.to_bytes()?;
281 let f = f64::from_bits(f as u64);
282 let f = match intrinsic_name {
284 "fabsf64" => f.abs(),
286 "sqrtf64" => f.sqrt(),
288 "exp2f64" => f.exp2(),
290 "log10f64" => f.log10(),
291 "log2f64" => f.log2(),
292 "floorf64" => f.floor(),
293 "ceilf64" => f.ceil(),
294 "truncf64" => f.trunc(),
297 self.write_scalar(dest, Scalar::from_f64(f), dest_layout.ty)?;
300 "fadd_fast" | "fsub_fast" | "fmul_fast" | "fdiv_fast" | "frem_fast" => {
301 let ty = substs.type_at(0);
302 let a = self.value_to_scalar(args[0])?;
303 let b = self.value_to_scalar(args[1])?;
304 let op = match intrinsic_name {
305 "fadd_fast" => mir::BinOp::Add,
306 "fsub_fast" => mir::BinOp::Sub,
307 "fmul_fast" => mir::BinOp::Mul,
308 "fdiv_fast" => mir::BinOp::Div,
309 "frem_fast" => mir::BinOp::Rem,
312 let result = self.binary_op(op, a, ty, b, ty)?;
313 self.write_scalar(dest, result.0, dest_layout.ty)?;
317 // Performs an exact division, resulting in undefined behavior where
318 // `x % y != 0` or `y == 0` or `x == T::min_value() && y == -1`
319 let ty = substs.type_at(0);
320 let a = self.value_to_scalar(args[0])?;
321 let b = self.value_to_scalar(args[1])?;
323 if !self.binary_op(mir::BinOp::Rem, a, ty, b, ty)?.0.is_null() {
324 return err!(ValidationFailure(format!("exact_div: {:?} cannot be divided by {:?}", a, b)));
326 let result = self.binary_op(mir::BinOp::Div, a, ty, b, ty)?;
327 self.write_scalar(dest, result.0, dest_layout.ty)?;
330 "likely" | "unlikely" | "forget" => {}
333 // we don't want to force an allocation in case the destination is a simple value
335 Place::Local { frame, local } => {
336 match self.stack()[frame].locals[local].access()? {
337 Value::ByRef(ptr, _) => {
338 // These writes have no alignment restriction anyway.
339 self.memory.write_repeat(ptr, 0, dest_layout.size)?;
341 Value::Scalar(_) => self.write_value(ValTy { value: Value::Scalar(Scalar::null(dest_layout.size).into()), ty: dest_layout.ty }, dest)?,
342 Value::ScalarPair(..) => {
343 self.write_value(ValTy { value: Value::ScalarPair(Scalar::null(dest_layout.size).into(), Scalar::null(dest_layout.size).into()), ty: dest_layout.ty }, dest)?;
350 extra: PlaceExtra::None,
351 } => self.memory.write_repeat(ptr.unwrap_or_err()?, 0, dest_layout.size)?,
352 Place::Ptr { .. } => {
353 bug!("init intrinsic tried to write to fat or unaligned ptr target")
359 let elem_ty = substs.type_at(0);
360 let elem_align = self.layout_of(elem_ty)?.align.abi();
361 let ptr_size = self.memory.pointer_size();
362 let align_val = Scalar::from_uint(elem_align as u128, ptr_size);
363 self.write_scalar(dest, align_val, dest_layout.ty)?;
367 let ty = substs.type_at(0);
368 let layout = self.layout_of(ty)?;
369 let align = layout.align.pref();
370 let ptr_size = self.memory.pointer_size();
371 let align_val = Scalar::from_uint(align as u128, ptr_size);
372 self.write_scalar(dest, align_val, dest_layout.ty)?;
376 let ty = substs.type_at(0);
377 let ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?;
378 let align = self.layout_of(args[0].ty)?.align;
379 self.write_value_to_ptr(args[1].value, ptr, align, ty)?;
383 let ty = substs.type_at(0);
384 let env = ty::ParamEnv::reveal_all();
385 let needs_drop = ty.needs_drop(self.tcx.tcx, env);
388 Scalar::from_bool(needs_drop),
394 let offset = self.value_to_isize(args[1])?;
395 let ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?;
396 let result_ptr = self.pointer_offset(ptr, substs.type_at(0), offset)?;
397 self.write_ptr(dest, result_ptr, dest_layout.ty)?;
400 "overflowing_sub" => {
401 self.intrinsic_overflowing(
410 "overflowing_mul" => {
411 self.intrinsic_overflowing(
420 "overflowing_add" => {
421 self.intrinsic_overflowing(
431 let f = self.value_to_scalar(args[0])?.to_bits(Size::from_bits(32))?;
432 let f = f32::from_bits(f as u32);
433 let f2 = self.value_to_scalar(args[1])?.to_bits(Size::from_bits(32))?;
434 let f2 = f32::from_bits(f2 as u32);
437 Scalar::from_f32(f.powf(f2)),
443 let f = self.value_to_scalar(args[0])?.to_bits(Size::from_bits(64))?;
444 let f = f64::from_bits(f as u64);
445 let f2 = self.value_to_scalar(args[1])?.to_bits(Size::from_bits(64))?;
446 let f2 = f64::from_bits(f2 as u64);
449 Scalar::from_f64(f.powf(f2)),
455 let a = self.value_to_scalar(args[0])?.to_bits(Size::from_bits(32))?;
456 let a = f32::from_bits(a as u32);
457 let b = self.value_to_scalar(args[1])?.to_bits(Size::from_bits(32))?;
458 let b = f32::from_bits(b as u32);
459 let c = self.value_to_scalar(args[2])?.to_bits(Size::from_bits(32))?;
460 let c = f32::from_bits(c as u32);
463 Scalar::from_f32(a * b + c),
469 let a = self.value_to_scalar(args[0])?.to_bits(Size::from_bits(64))?;
470 let a = f64::from_bits(a as u64);
471 let b = self.value_to_scalar(args[1])?.to_bits(Size::from_bits(64))?;
472 let b = f64::from_bits(b as u64);
473 let c = self.value_to_scalar(args[2])?.to_bits(Size::from_bits(64))?;
474 let c = f64::from_bits(c as u64);
477 Scalar::from_f64(a * b + c),
483 let f = self.value_to_scalar(args[0])?.to_bits(Size::from_bits(32))?;
484 let f = f32::from_bits(f as u32);
485 let i = self.value_to_i32(args[1])?;
488 Scalar::from_f32(f.powi(i)),
494 let f = self.value_to_scalar(args[0])?.to_bits(Size::from_bits(64))?;
495 let f = f64::from_bits(f as u64);
496 let i = self.value_to_i32(args[1])?;
499 Scalar::from_f64(f.powi(i)),
505 let ty = substs.type_at(0);
506 let size = self.layout_of(ty)?.size.bytes();
507 let ptr_size = self.memory.pointer_size();
508 self.write_scalar(dest, Scalar::from_uint(size, ptr_size), dest_layout.ty)?;
512 let ty = substs.type_at(0);
513 let (size, _) = self.size_and_align_of_dst(ty, args[0].value)?;
514 let ptr_size = self.memory.pointer_size();
517 Scalar::from_uint(size.bytes() as u128, ptr_size),
524 let ty = substs.type_at(0);
525 let (_, align) = self.size_and_align_of_dst(ty, args[0].value)?;
526 let ptr_size = self.memory.pointer_size();
529 Scalar::from_uint(align.abi(), ptr_size),
535 let ty = substs.type_at(0);
536 let ty_name = ty.to_string();
537 let value = self.str_to_value(&ty_name)?;
538 self.write_value(ValTy { value, ty: dest_layout.ty }, dest)?;
541 let ty = substs.type_at(0);
542 let n = self.tcx.type_id_hash(ty);
543 self.write_scalar(dest, Scalar::Bits { bits: n as u128, size: 8 }, dest_layout.ty)?;
547 let src_ty = substs.type_at(0);
548 let _src_align = self.layout_of(src_ty)?.align;
549 let ptr = self.force_allocation(dest)?.to_ptr()?;
550 let dest_align = self.layout_of(substs.type_at(1))?.align;
551 self.write_value_to_ptr(args[0].value, ptr.into(), dest_align, src_ty).unwrap();
555 let bits = dest_layout.size.bytes() as u128 * 8;
556 let rhs = self.value_to_scalar(args[1])?
559 return err!(Intrinsic(
560 format!("Overflowing shift by {} in unchecked_shl", rhs),
563 self.intrinsic_overflowing(
573 let bits = dest_layout.size.bytes() as u128 * 8;
574 let rhs = self.value_to_scalar(args[1])?
577 return err!(Intrinsic(
578 format!("Overflowing shift by {} in unchecked_shr", rhs),
581 self.intrinsic_overflowing(
591 let rhs = self.value_to_scalar(args[1])?
594 return err!(Intrinsic(format!("Division by 0 in unchecked_div")));
596 self.intrinsic_overflowing(
606 let rhs = self.value_to_scalar(args[1])?
609 return err!(Intrinsic(format!("Division by 0 in unchecked_rem")));
611 self.intrinsic_overflowing(
621 // we don't want to force an allocation in case the destination is a simple value
623 Place::Local { frame, local } => {
624 match self.stack()[frame].locals[local].access()? {
625 Value::ByRef(ptr, _) => {
626 // These writes have no alignment restriction anyway.
627 self.memory.mark_definedness(ptr, dest_layout.size, false)?;
629 Value::Scalar(_) => self.write_value(ValTy { value: Value::Scalar(ScalarMaybeUndef::Undef), ty: dest_layout.ty }, dest)?,
630 Value::ScalarPair(..) => {
631 self.write_value(ValTy { value: Value::ScalarPair(ScalarMaybeUndef::Undef, ScalarMaybeUndef::Undef), ty: dest_layout.ty }, dest)?;
638 extra: PlaceExtra::None,
639 } => self.memory.mark_definedness(ptr.unwrap_or_err()?, dest_layout.size, false)?,
640 Place::Ptr { .. } => {
641 bug!("uninit intrinsic tried to write to fat or unaligned ptr target")
647 let ty = substs.type_at(0);
648 let ty_layout = self.layout_of(ty)?;
649 let val_byte = self.value_to_u8(args[1])?;
650 let ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?;
651 let count = self.value_to_usize(args[2])?;
653 // HashMap relies on write_bytes on a NULL ptr with count == 0 to work
654 // TODO: Should we, at least, validate the alignment? (Also see the copy intrinsic)
655 self.memory.check_align(ptr, ty_layout.align)?;
656 self.memory.write_repeat(ptr, val_byte, ty_layout.size * count)?;
660 name => return err!(Unimplemented(format!("unimplemented intrinsic: {}", name))),
663 self.goto_block(target);
665 // Since we pushed no stack frame, the main loop will act
666 // as if the call just completed and it's returning to the
672 fn numeric_intrinsic<'tcx>(
676 ) -> EvalResult<'tcx, Scalar> {
677 macro_rules! integer_intrinsic {
678 ($method:ident) => ({
679 let (result_bytes, size) = match kind {
680 Primitive::Int(I8, true) => ((bytes as i8).$method() as u128, 1),
681 Primitive::Int(I8, false) => ((bytes as u8).$method() as u128, 1),
682 Primitive::Int(I16, true) => ((bytes as i16).$method() as u128, 2),
683 Primitive::Int(I16, false) => ((bytes as u16).$method() as u128, 2),
684 Primitive::Int(I32, true) => ((bytes as i32).$method() as u128, 4),
685 Primitive::Int(I32, false) => ((bytes as u32).$method() as u128, 4),
686 Primitive::Int(I64, true) => ((bytes as i64).$method() as u128, 8),
687 Primitive::Int(I64, false) => ((bytes as u64).$method() as u128, 8),
688 Primitive::Int(I128, true) => ((bytes as i128).$method() as u128, 16),
689 Primitive::Int(I128, false) => (bytes.$method() as u128, 16),
690 _ => bug!("invalid `{}` argument: {:?}", name, bytes),
693 Scalar::from_uint(result_bytes, Size::from_bytes(size))
697 let result_val = match name {
698 "bswap" => integer_intrinsic!(swap_bytes),
699 "ctlz" => integer_intrinsic!(leading_zeros),
700 "ctpop" => integer_intrinsic!(count_ones),
701 "cttz" => integer_intrinsic!(trailing_zeros),
702 _ => bug!("not a numeric intrinsic: {}", name),