fn call_c_abi(
&mut self,
def_id: DefId,
- arg_operands: &[mir::Operand<'tcx>],
+ args: &[ValTy<'tcx>],
dest: Lvalue,
dest_ty: Ty<'tcx>,
dest_block: mir::BasicBlock,
&mut self,
instance: ty::Instance<'tcx>,
destination: Option<(Lvalue, mir::BasicBlock)>,
- arg_operands: &[mir::Operand<'tcx>],
+ args: &[ValTy<'tcx>],
sig: ty::FnSig<'tcx>,
path: String,
) -> EvalResult<'tcx>;
&mut self,
instance: ty::Instance<'tcx>,
destination: Option<(Lvalue, mir::BasicBlock)>,
- arg_operands: &[mir::Operand<'tcx>],
+ args: &[ValTy<'tcx>],
span: Span,
sig: ty::FnSig<'tcx>,
) -> EvalResult<'tcx, bool>;
&mut self,
instance: ty::Instance<'tcx>,
destination: Option<(Lvalue, mir::BasicBlock)>,
- arg_operands: &[mir::Operand<'tcx>],
+ args: &[ValTy<'tcx>],
span: Span,
sig: ty::FnSig<'tcx>,
) -> EvalResult<'tcx, bool> {
self.call_missing_fn(
instance,
destination,
- arg_operands,
+ args,
sig,
path,
)?;
fn call_c_abi(
&mut self,
def_id: DefId,
- arg_operands: &[mir::Operand<'tcx>],
+ args: &[ValTy<'tcx>],
dest: Lvalue,
dest_ty: Ty<'tcx>,
dest_block: mir::BasicBlock,
.unwrap_or(name)
.as_str();
- let args_res: EvalResult<Vec<Value>> = arg_operands
- .iter()
- .map(|arg| self.eval_operand(arg))
- .collect();
- let args = args_res?;
-
- let usize = self.tcx.types.usize;
-
match &link_name[..] {
"malloc" => {
- let size = self.value_to_primval(args[0], usize)?.to_u64()?;
+ let size = self.value_to_primval(args[0])?.to_u64()?;
if size == 0 {
self.write_null(dest, dest_ty)?;
} else {
//
// libc::syscall(NR_GETRANDOM, buf.as_mut_ptr(), buf.len(), GRND_NONBLOCK)
// is called if a `HashMap` is created the regular way.
- match self.value_to_primval(args[0], usize)?.to_u64()? {
+ match self.value_to_primval(args[0])?.to_u64()? {
318 | 511 => {
return err!(Unimplemented(
"miri does not support random number generators".to_owned(),
"memcmp" => {
let left = args[0].into_ptr(&mut self.memory)?;
let right = args[1].into_ptr(&mut self.memory)?;
- let n = self.value_to_primval(args[2], usize)?.to_u64()?;
+ let n = self.value_to_primval(args[2])?.to_u64()?;
let result = {
let left_bytes = self.memory.read_bytes(left, n)?;
"memrchr" => {
let ptr = args[0].into_ptr(&mut self.memory)?;
- let val = self.value_to_primval(args[1], usize)?.to_u64()? as u8;
- let num = self.value_to_primval(args[2], usize)?.to_u64()?;
+ let val = self.value_to_primval(args[1])?.to_u64()? as u8;
+ let num = self.value_to_primval(args[2])?.to_u64()?;
if let Some(idx) = self.memory.read_bytes(ptr, num)?.iter().rev().position(
|&c| c == val,
)
"memchr" => {
let ptr = args[0].into_ptr(&mut self.memory)?;
- let val = self.value_to_primval(args[1], usize)?.to_u64()? as u8;
- let num = self.value_to_primval(args[2], usize)?.to_u64()?;
+ let val = self.value_to_primval(args[1])?.to_u64()? as u8;
+ let num = self.value_to_primval(args[2])?.to_u64()?;
if let Some(idx) = self.memory.read_bytes(ptr, num)?.iter().position(
|&c| c == val,
)
}
"write" => {
- let fd = self.value_to_primval(args[0], usize)?.to_u64()?;
+ let fd = self.value_to_primval(args[0])?.to_u64()?;
let buf = args[1].into_ptr(&mut self.memory)?;
- let n = self.value_to_primval(args[2], usize)?.to_u64()?;
+ let n = self.value_to_primval(args[2])?.to_u64()?;
trace!("Called write({:?}, {:?}, {:?})", fd, buf, n);
let result = if fd == 1 || fd == 2 {
// stdout/stderr
}
"sysconf" => {
- let c_int = self.operand_ty(&arg_operands[0]);
- let name = self.value_to_primval(args[0], c_int)?.to_u64()?;
+ let name = self.value_to_primval(args[0])?.to_u64()?;
trace!("sysconf() called with name {}", name);
// cache the sysconf integers via miri's global cache
let paths = &[
};
// compute global if not cached
let val = match self.globals.get(&cid).cloned() {
- Some(ptr) => self.value_to_primval(Value::ByRef(ptr), c_int)?.to_u64()?,
+ Some(ptr) => self.value_to_primval(ValTy { value: Value::ByRef(ptr), ty: args[0].ty })?.to_u64()?,
None => eval_body_as_primval(self.tcx, instance)?.0.to_u64()?,
};
if val == name {
};
// Figure out how large a pthread TLS key actually is. This is libc::pthread_key_t.
- let key_type = self.operand_ty(&arg_operands[0]).builtin_deref(true, ty::LvaluePreference::NoPreference)
+ let key_type = args[0].ty.builtin_deref(true, ty::LvaluePreference::NoPreference)
.ok_or(EvalErrorKind::AbiViolation("Wrong signature used for pthread_key_create: First argument must be a raw pointer.".to_owned()))?.ty;
let key_size = {
let layout = self.type_layout(key_type)?;
}
"pthread_key_delete" => {
// The conversion into TlsKey here is a little fishy, but should work as long as usize >= libc::pthread_key_t
- let key = self.value_to_primval(args[0], usize)?.to_u64()? as TlsKey;
+ let key = self.value_to_primval(args[0])?.to_u64()? as TlsKey;
self.memory.delete_tls_key(key)?;
// Return success (0)
self.write_null(dest, dest_ty)?;
}
"pthread_getspecific" => {
// The conversion into TlsKey here is a little fishy, but should work as long as usize >= libc::pthread_key_t
- let key = self.value_to_primval(args[0], usize)?.to_u64()? as TlsKey;
+ let key = self.value_to_primval(args[0])?.to_u64()? as TlsKey;
let ptr = self.memory.load_tls(key)?;
self.write_ptr(dest, ptr, dest_ty)?;
}
"pthread_setspecific" => {
// The conversion into TlsKey here is a little fishy, but should work as long as usize >= libc::pthread_key_t
- let key = self.value_to_primval(args[0], usize)?.to_u64()? as TlsKey;
+ let key = self.value_to_primval(args[0])?.to_u64()? as TlsKey;
let new_ptr = args[1].into_ptr(&mut self.memory)?;
self.memory.store_tls(key, new_ptr)?;
&mut self,
instance: ty::Instance<'tcx>,
destination: Option<(Lvalue, mir::BasicBlock)>,
- arg_operands: &[mir::Operand<'tcx>],
+ args: &[ValTy<'tcx>],
sig: ty::FnSig<'tcx>,
path: String,
) -> EvalResult<'tcx> {
// unify these two mechanisms for "hooking into missing functions".
self.call_c_abi(
instance.def_id(),
- arg_operands,
+ args,
dest,
dest_ty,
dest_block,
return Ok(());
}
- let args_res: EvalResult<Vec<Value>> = arg_operands
- .iter()
- .map(|arg| self.eval_operand(arg))
- .collect();
- let args = args_res?;
-
- let usize = self.tcx.types.usize;
-
match &path[..] {
// Allocators are magic. They have no MIR, even when the rest of libstd does.
"alloc::heap::::__rust_alloc" => {
- let size = self.value_to_primval(args[0], usize)?.to_u64()?;
- let align = self.value_to_primval(args[1], usize)?.to_u64()?;
+ let size = self.value_to_primval(args[0])?.to_u64()?;
+ let align = self.value_to_primval(args[1])?.to_u64()?;
if size == 0 {
return err!(HeapAllocZeroBytes);
}
self.write_primval(dest, PrimVal::Ptr(ptr), dest_ty)?;
}
"alloc::heap::::__rust_alloc_zeroed" => {
- let size = self.value_to_primval(args[0], usize)?.to_u64()?;
- let align = self.value_to_primval(args[1], usize)?.to_u64()?;
+ let size = self.value_to_primval(args[0])?.to_u64()?;
+ let align = self.value_to_primval(args[1])?.to_u64()?;
if size == 0 {
return err!(HeapAllocZeroBytes);
}
}
"alloc::heap::::__rust_dealloc" => {
let ptr = args[0].into_ptr(&mut self.memory)?.to_ptr()?;
- let old_size = self.value_to_primval(args[1], usize)?.to_u64()?;
- let align = self.value_to_primval(args[2], usize)?.to_u64()?;
+ let old_size = self.value_to_primval(args[1])?.to_u64()?;
+ let align = self.value_to_primval(args[2])?.to_u64()?;
if old_size == 0 {
return err!(HeapAllocZeroBytes);
}
}
"alloc::heap::::__rust_realloc" => {
let ptr = args[0].into_ptr(&mut self.memory)?.to_ptr()?;
- let old_size = self.value_to_primval(args[1], usize)?.to_u64()?;
- let old_align = self.value_to_primval(args[2], usize)?.to_u64()?;
- let new_size = self.value_to_primval(args[3], usize)?.to_u64()?;
- let new_align = self.value_to_primval(args[4], usize)?.to_u64()?;
+ let old_size = self.value_to_primval(args[1])?.to_u64()?;
+ let old_align = self.value_to_primval(args[2])?.to_u64()?;
+ let new_size = self.value_to_primval(args[3])?.to_u64()?;
+ let new_align = self.value_to_primval(args[4])?.to_u64()?;
if old_size == 0 || new_size == 0 {
return err!(HeapAllocZeroBytes);
}
use rustc::ty::{self, Ty};
use rustc_miri::interpret::{EvalResult, Lvalue, LvalueExtra, PrimVal, PrimValKind, Value, Pointer,
- HasMemory, EvalContext, PtrAndAlign};
+ HasMemory, EvalContext, PtrAndAlign, ValTy};
use helpers::EvalContextExt as HelperEvalContextExt;
fn call_intrinsic(
&mut self,
instance: ty::Instance<'tcx>,
- args: &[mir::Operand<'tcx>],
+ args: &[ValTy<'tcx>],
dest: Lvalue,
dest_ty: Ty<'tcx>,
dest_layout: &'tcx Layout,
fn call_intrinsic(
&mut self,
instance: ty::Instance<'tcx>,
- args: &[mir::Operand<'tcx>],
+ args: &[ValTy<'tcx>],
dest: Lvalue,
dest_ty: Ty<'tcx>,
dest_layout: &'tcx Layout,
target: mir::BasicBlock,
) -> EvalResult<'tcx> {
- let arg_vals: EvalResult<Vec<Value>> =
- args.iter().map(|arg| self.eval_operand(arg)).collect();
- let arg_vals = arg_vals?;
- let i32 = self.tcx.types.i32;
- let isize = self.tcx.types.isize;
- let usize = self.tcx.types.usize;
- let f32 = self.tcx.types.f32;
- let f64 = self.tcx.types.f64;
let substs = instance.substs;
let intrinsic_name = &self.tcx.item_name(instance.def_id()).as_str()[..];
"add_with_overflow" => {
self.intrinsic_with_overflow(
mir::BinOp::Add,
- &args[0],
- &args[1],
+ args[0],
+ args[1],
dest,
dest_ty,
)?
"sub_with_overflow" => {
self.intrinsic_with_overflow(
mir::BinOp::Sub,
- &args[0],
- &args[1],
+ args[0],
+ args[1],
dest,
dest_ty,
)?
"mul_with_overflow" => {
self.intrinsic_with_overflow(
mir::BinOp::Mul,
- &args[0],
- &args[1],
+ args[0],
+ args[1],
dest,
dest_ty,
)?
}
"arith_offset" => {
- let offset = self.value_to_primval(arg_vals[1], isize)?.to_i128()? as i64;
- let ptr = arg_vals[0].into_ptr(&self.memory)?;
+ let offset = self.value_to_primval(args[1])?.to_i128()? as i64;
+ let ptr = args[0].into_ptr(&self.memory)?;
let result_ptr = self.wrapping_pointer_offset(ptr, substs.type_at(0), offset)?;
self.write_ptr(dest, result_ptr, dest_ty)?;
}
"assume" => {
- let bool = self.tcx.types.bool;
- let cond = self.value_to_primval(arg_vals[0], bool)?.to_bool()?;
+ let cond = self.value_to_primval(args[0])?.to_bool()?;
if !cond {
return err!(AssumptionNotHeld);
}
"atomic_load_relaxed" |
"atomic_load_acq" |
"volatile_load" => {
- let ty = substs.type_at(0);
- let ptr = arg_vals[0].into_ptr(&self.memory)?;
- self.write_value(Value::by_ref(ptr), dest, ty)?;
+ let ptr = args[0].into_ptr(&self.memory)?;
+ let valty = ValTy {
+ value: Value::by_ref(ptr),
+ ty: substs.type_at(0),
+ };
+ self.write_value(valty, dest)?;
}
"atomic_store" |
"atomic_store_rel" |
"volatile_store" => {
let ty = substs.type_at(0);
- let dest = arg_vals[0].into_ptr(&self.memory)?;
- self.write_value_to_ptr(arg_vals[1], dest, ty)?;
+ let dest = args[0].into_ptr(&self.memory)?;
+ self.write_value_to_ptr(args[1].value, dest, ty)?;
}
"atomic_fence_acq" => {
_ if intrinsic_name.starts_with("atomic_xchg") => {
let ty = substs.type_at(0);
- let ptr = arg_vals[0].into_ptr(&self.memory)?;
- let change = self.value_to_primval(arg_vals[1], ty)?;
+ let ptr = args[0].into_ptr(&self.memory)?;
+ let change = self.value_to_primval(args[1])?;
let old = self.read_value(ptr, ty)?;
let old = match old {
Value::ByVal(val) => val,
_ if intrinsic_name.starts_with("atomic_cxchg") => {
let ty = substs.type_at(0);
- let ptr = arg_vals[0].into_ptr(&self.memory)?;
- let expect_old = self.value_to_primval(arg_vals[1], ty)?;
- let change = self.value_to_primval(arg_vals[2], ty)?;
+ let ptr = args[0].into_ptr(&self.memory)?;
+ let expect_old = self.value_to_primval(args[1])?;
+ let change = self.value_to_primval(args[2])?;
let old = self.read_value(ptr, ty)?;
let old = match old {
Value::ByVal(val) => val,
"atomic_xsub_acqrel" |
"atomic_xsub_relaxed" => {
let ty = substs.type_at(0);
- let ptr = arg_vals[0].into_ptr(&self.memory)?;
- let change = self.value_to_primval(arg_vals[1], ty)?;
+ let ptr = args[0].into_ptr(&self.memory)?;
+ let change = self.value_to_primval(args[1])?;
let old = self.read_value(ptr, ty)?;
let old = match old {
Value::ByVal(val) => val,
"copy_nonoverlapping" => {
let elem_ty = substs.type_at(0);
let elem_size = self.type_size(elem_ty)?.expect("cannot copy unsized value");
- let count = self.value_to_primval(arg_vals[2], usize)?.to_u64()?;
+ let count = self.value_to_primval(args[2])?.to_u64()?;
if count * elem_size != 0 {
// TODO: We do not even validate alignment for the 0-bytes case. libstd relies on this in vec::IntoIter::next.
// Also see the write_bytes intrinsic.
let elem_align = self.type_align(elem_ty)?;
- let src = arg_vals[0].into_ptr(&self.memory)?;
- let dest = arg_vals[1].into_ptr(&self.memory)?;
+ let src = args[0].into_ptr(&self.memory)?;
+ let dest = args[1].into_ptr(&self.memory)?;
self.memory.copy(
src,
dest,
"ctpop" | "cttz" | "cttz_nonzero" | "ctlz" | "ctlz_nonzero" | "bswap" => {
let ty = substs.type_at(0);
- let num = self.value_to_primval(arg_vals[0], ty)?.to_bytes()?;
+ let num = self.value_to_primval(args[0])?.to_bytes()?;
let kind = self.ty_to_primval_kind(ty)?;
let num = if intrinsic_name.ends_with("_nonzero") {
if num == 0 {
"discriminant_value" => {
let ty = substs.type_at(0);
- let adt_ptr = arg_vals[0].into_ptr(&self.memory)?.to_ptr()?;
+ let adt_ptr = args[0].into_ptr(&self.memory)?.to_ptr()?;
let discr_val = self.read_discriminant_value(adt_ptr, ty)?;
self.write_primval(dest, PrimVal::Bytes(discr_val), dest_ty)?;
}
"sinf32" | "fabsf32" | "cosf32" | "sqrtf32" | "expf32" | "exp2f32" | "logf32" |
"log10f32" | "log2f32" | "floorf32" | "ceilf32" | "truncf32" => {
- let f = self.value_to_primval(arg_vals[0], f32)?.to_f32()?;
+ let f = self.value_to_primval(args[0])?.to_f32()?;
let f = match intrinsic_name {
"sinf32" => f.sin(),
"fabsf32" => f.abs(),
"sinf64" | "fabsf64" | "cosf64" | "sqrtf64" | "expf64" | "exp2f64" | "logf64" |
"log10f64" | "log2f64" | "floorf64" | "ceilf64" | "truncf64" => {
- let f = self.value_to_primval(arg_vals[0], f64)?.to_f64()?;
+ let f = self.value_to_primval(args[0])?.to_f64()?;
let f = match intrinsic_name {
"sinf64" => f.sin(),
"fabsf64" => f.abs(),
"fadd_fast" | "fsub_fast" | "fmul_fast" | "fdiv_fast" | "frem_fast" => {
let ty = substs.type_at(0);
- let a = self.value_to_primval(arg_vals[0], ty)?;
- let b = self.value_to_primval(arg_vals[1], ty)?;
+ let a = self.value_to_primval(args[0])?;
+ let b = self.value_to_primval(args[1])?;
let op = match intrinsic_name {
"fadd_fast" => mir::BinOp::Add,
"fsub_fast" => mir::BinOp::Sub,
"move_val_init" => {
let ty = substs.type_at(0);
- let ptr = arg_vals[0].into_ptr(&self.memory)?;
- self.write_value_to_ptr(arg_vals[1], ptr, ty)?;
+ let ptr = args[0].into_ptr(&self.memory)?;
+ self.write_value_to_ptr(args[1].value, ptr, ty)?;
}
"needs_drop" => {
}
"offset" => {
- let offset = self.value_to_primval(arg_vals[1], isize)?.to_i128()? as i64;
- let ptr = arg_vals[0].into_ptr(&self.memory)?;
+ let offset = self.value_to_primval(args[1])?.to_i128()? as i64;
+ let ptr = args[0].into_ptr(&self.memory)?;
let result_ptr = self.pointer_offset(ptr, substs.type_at(0), offset)?;
self.write_ptr(dest, result_ptr, dest_ty)?;
}
"overflowing_sub" => {
self.intrinsic_overflowing(
mir::BinOp::Sub,
- &args[0],
- &args[1],
+ args[0],
+ args[1],
dest,
dest_ty,
)?;
"overflowing_mul" => {
self.intrinsic_overflowing(
mir::BinOp::Mul,
- &args[0],
- &args[1],
+ args[0],
+ args[1],
dest,
dest_ty,
)?;
"overflowing_add" => {
self.intrinsic_overflowing(
mir::BinOp::Add,
- &args[0],
- &args[1],
+ args[0],
+ args[1],
dest,
dest_ty,
)?;
}
"powf32" => {
- let f = self.value_to_primval(arg_vals[0], f32)?.to_f32()?;
- let f2 = self.value_to_primval(arg_vals[1], f32)?.to_f32()?;
+ let f = self.value_to_primval(args[0])?.to_f32()?;
+ let f2 = self.value_to_primval(args[1])?.to_f32()?;
self.write_primval(
dest,
PrimVal::from_f32(f.powf(f2)),
}
"powf64" => {
- let f = self.value_to_primval(arg_vals[0], f64)?.to_f64()?;
- let f2 = self.value_to_primval(arg_vals[1], f64)?.to_f64()?;
+ let f = self.value_to_primval(args[0])?.to_f64()?;
+ let f2 = self.value_to_primval(args[1])?.to_f64()?;
self.write_primval(
dest,
PrimVal::from_f64(f.powf(f2)),
}
"fmaf32" => {
- let a = self.value_to_primval(arg_vals[0], f32)?.to_f32()?;
- let b = self.value_to_primval(arg_vals[1], f32)?.to_f32()?;
- let c = self.value_to_primval(arg_vals[2], f32)?.to_f32()?;
+ let a = self.value_to_primval(args[0])?.to_f32()?;
+ let b = self.value_to_primval(args[1])?.to_f32()?;
+ let c = self.value_to_primval(args[2])?.to_f32()?;
self.write_primval(
dest,
PrimVal::from_f32(a * b + c),
}
"fmaf64" => {
- let a = self.value_to_primval(arg_vals[0], f64)?.to_f64()?;
- let b = self.value_to_primval(arg_vals[1], f64)?.to_f64()?;
- let c = self.value_to_primval(arg_vals[2], f64)?.to_f64()?;
+ let a = self.value_to_primval(args[0])?.to_f64()?;
+ let b = self.value_to_primval(args[1])?.to_f64()?;
+ let c = self.value_to_primval(args[2])?.to_f64()?;
self.write_primval(
dest,
PrimVal::from_f64(a * b + c),
}
"powif32" => {
- let f = self.value_to_primval(arg_vals[0], f32)?.to_f32()?;
- let i = self.value_to_primval(arg_vals[1], i32)?.to_i128()?;
+ let f = self.value_to_primval(args[0])?.to_f32()?;
+ let i = self.value_to_primval(args[1])?.to_i128()?;
self.write_primval(
dest,
PrimVal::from_f32(f.powi(i as i32)),
}
"powif64" => {
- let f = self.value_to_primval(arg_vals[0], f64)?.to_f64()?;
- let i = self.value_to_primval(arg_vals[1], i32)?.to_i128()?;
+ let f = self.value_to_primval(args[0])?.to_f64()?;
+ let i = self.value_to_primval(args[1])?.to_i128()?;
self.write_primval(
dest,
PrimVal::from_f64(f.powi(i as i32)),
"size_of_val" => {
let ty = substs.type_at(0);
- let (size, _) = self.size_and_align_of_dst(ty, arg_vals[0])?;
+ let (size, _) = self.size_and_align_of_dst(ty, args[0].value)?;
self.write_primval(
dest,
PrimVal::from_u128(size as u128),
"min_align_of_val" |
"align_of_val" => {
let ty = substs.type_at(0);
- let (_, align) = self.size_and_align_of_dst(ty, arg_vals[0])?;
+ let (_, align) = self.size_and_align_of_dst(ty, args[0].value)?;
self.write_primval(
dest,
PrimVal::from_u128(align as u128),
"type_name" => {
let ty = substs.type_at(0);
let ty_name = ty.to_string();
- let s = self.str_to_value(&ty_name)?;
- self.write_value(s, dest, dest_ty)?;
+ let value = self.str_to_value(&ty_name)?;
+ self.write_value(ValTy { value, ty: dest_ty }, dest)?;
}
"type_id" => {
let ty = substs.type_at(0);
/*aligned*/
false,
|ectx| {
- ectx.write_value_to_ptr(arg_vals[0], ptr.into(), src_ty)
+ ectx.write_value_to_ptr(args[0].value, ptr.into(), src_ty)
},
)?;
}
let bits = self.type_size(dest_ty)?.expect(
"intrinsic can't be called on unsized type",
) as u128 * 8;
- let rhs = self.value_to_primval(arg_vals[1], substs.type_at(0))?
+ let rhs = self.value_to_primval(args[1])?
.to_bytes()?;
if rhs >= bits {
return err!(Intrinsic(
}
self.intrinsic_overflowing(
mir::BinOp::Shl,
- &args[0],
- &args[1],
+ args[0],
+ args[1],
dest,
dest_ty,
)?;
let bits = self.type_size(dest_ty)?.expect(
"intrinsic can't be called on unsized type",
) as u128 * 8;
- let rhs = self.value_to_primval(arg_vals[1], substs.type_at(0))?
+ let rhs = self.value_to_primval(args[1])?
.to_bytes()?;
if rhs >= bits {
return err!(Intrinsic(
}
self.intrinsic_overflowing(
mir::BinOp::Shr,
- &args[0],
- &args[1],
+ args[0],
+ args[1],
dest,
dest_ty,
)?;
}
"unchecked_div" => {
- let rhs = self.value_to_primval(arg_vals[1], substs.type_at(0))?
+ let rhs = self.value_to_primval(args[1])?
.to_bytes()?;
if rhs == 0 {
return err!(Intrinsic(format!("Division by 0 in unchecked_div")));
}
self.intrinsic_overflowing(
mir::BinOp::Div,
- &args[0],
- &args[1],
+ args[0],
+ args[1],
dest,
dest_ty,
)?;
}
"unchecked_rem" => {
- let rhs = self.value_to_primval(arg_vals[1], substs.type_at(0))?
+ let rhs = self.value_to_primval(args[1])?
.to_bytes()?;
if rhs == 0 {
return err!(Intrinsic(format!("Division by 0 in unchecked_rem")));
}
self.intrinsic_overflowing(
mir::BinOp::Rem,
- &args[0],
- &args[1],
+ args[0],
+ args[1],
dest,
dest_ty,
)?;
}
"write_bytes" => {
- let u8 = self.tcx.types.u8;
let ty = substs.type_at(0);
let ty_align = self.type_align(ty)?;
- let val_byte = self.value_to_primval(arg_vals[1], u8)?.to_u128()? as u8;
+ let val_byte = self.value_to_primval(args[1])?.to_u128()? as u8;
let size = self.type_size(ty)?.expect(
"write_bytes() type must be sized",
);
- let ptr = arg_vals[0].into_ptr(&self.memory)?;
- let count = self.value_to_primval(arg_vals[2], usize)?.to_u64()?;
+ let ptr = args[0].into_ptr(&self.memory)?;
+ let count = self.value_to_primval(args[2])?.to_u64()?;
if count > 0 {
// HashMap relies on write_bytes on a NULL ptr with count == 0 to work
// TODO: Should we, at least, validate the alignment? (Also see the copy intrinsic)
let main_ty = main_instance.def.def_ty(ecx.tcx);
let main_ptr_ty = ecx.tcx.mk_fn_ptr(main_ty.fn_sig(ecx.tcx));
ecx.write_value(
- Value::ByVal(PrimVal::Ptr(main_ptr)),
+ ValTy {
+ value: Value::ByVal(PrimVal::Ptr(main_ptr)),
+ ty: main_ptr_ty,
+ },
dest,
- main_ptr_ty,
)?;
// Second argument (argc): 0
ecx: &mut EvalContext<'a, 'tcx, Self>,
instance: ty::Instance<'tcx>,
destination: Option<(Lvalue, mir::BasicBlock)>,
- arg_operands: &[mir::Operand<'tcx>],
+ args: &[ValTy<'tcx>],
span: Span,
sig: ty::FnSig<'tcx>,
) -> EvalResult<'tcx, bool> {
- ecx.eval_fn_call(instance, destination, arg_operands, span, sig)
+ ecx.eval_fn_call(instance, destination, args, span, sig)
}
fn call_intrinsic<'a>(
ecx: &mut rustc_miri::interpret::EvalContext<'a, 'tcx, Self>,
instance: ty::Instance<'tcx>,
- args: &[mir::Operand<'tcx>],
+ args: &[ValTy<'tcx>],
dest: Lvalue,
dest_ty: ty::Ty<'tcx>,
dest_layout: &'tcx Layout,
use syntax::codemap::Span;
use super::{EvalResult, EvalError, EvalErrorKind, GlobalId, Lvalue, Value, PrimVal, EvalContext,
- StackPopCleanup, PtrAndAlign, MemoryKind};
+ StackPopCleanup, PtrAndAlign, MemoryKind, ValTy};
use rustc_const_math::ConstInt;
while ecx.step()? {}
}
let value = Value::ByRef(*ecx.globals.get(&cid).expect("global not cached"));
- Ok((ecx.value_to_primval(value, mir.return_ty)?, mir.return_ty))
+ let valty = ValTy {
+ value,
+ ty: mir.return_ty,
+ };
+ Ok((ecx.value_to_primval(valty)?, mir.return_ty))
}
pub fn eval_body_as_integer<'a, 'tcx>(
ecx: &mut EvalContext<'a, 'tcx, Self>,
instance: ty::Instance<'tcx>,
destination: Option<(Lvalue, mir::BasicBlock)>,
- _arg_operands: &[mir::Operand<'tcx>],
+ _args: &[ValTy<'tcx>],
span: Span,
_sig: ty::FnSig<'tcx>,
) -> EvalResult<'tcx, bool> {
fn call_intrinsic<'a>(
_ecx: &mut EvalContext<'a, 'tcx, Self>,
_instance: ty::Instance<'tcx>,
- _args: &[mir::Operand<'tcx>],
+ _args: &[ValTy<'tcx>],
_dest: Lvalue,
_dest_ty: Ty<'tcx>,
_dest_layout: &'tcx layout::Layout,
pub packed: bool,
}
+#[derive(Copy, Clone, Debug)]
+pub struct ValTy<'tcx> {
+ pub value: Value,
+ pub ty: Ty<'tcx>,
+}
+
+impl<'tcx> ::std::ops::Deref for ValTy<'tcx> {
+ type Target = Value;
+ fn deref(&self) -> &Value {
+ &self.value
+ }
+}
+
#[derive(Copy, Clone, Debug)]
pub struct PtrAndAlign {
pub ptr: Pointer,
if self.ty_to_primval_kind(dest_ty).is_ok() {
assert_eq!(operands.len(), 1);
let value = self.eval_operand(&operands[0])?;
- let value_ty = self.operand_ty(&operands[0]);
- return self.write_value(value, dest, value_ty);
+ return self.write_value(value, dest);
}
for (field_index, operand) in operands.iter().enumerate() {
let value = self.eval_operand(operand)?;
- let value_ty = self.operand_ty(operand);
- let field_dest = self.lvalue_field(dest, field_index, dest_ty, value_ty)?;
- self.write_value(value, field_dest, value_ty)?;
+ let field_dest = self.lvalue_field(dest, field_index, dest_ty, value.ty)?;
+ self.write_value(value, field_dest)?;
}
Ok(())
}
use rustc::mir::Rvalue::*;
match *rvalue {
Use(ref operand) => {
- let value = self.eval_operand(operand)?;
- self.write_value(value, dest, dest_ty)?;
+ let value = self.eval_operand(operand)?.value;
+ let valty = ValTy {
+ value,
+ ty: dest_ty,
+ };
+ self.write_value(valty, dest)?;
}
BinaryOp(bin_op, ref left, ref right) => {
+ let left = self.eval_operand(left)?;
+ let right = self.eval_operand(right)?;
if self.intrinsic_overflowing(
bin_op,
left,
}
CheckedBinaryOp(bin_op, ref left, ref right) => {
+ let left = self.eval_operand(left)?;
+ let right = self.eval_operand(right)?;
self.intrinsic_with_overflow(
bin_op,
left,
assert_eq!(operands.len(), 1);
let operand = &operands[0];
let value = self.eval_operand(operand)?;
- let value_ty = self.operand_ty(operand);
- self.write_value(value, dest, value_ty)?;
+ self.write_value(value, dest)?;
} else {
if let Some(operand) = operands.get(0) {
assert_eq!(operands.len(), 1);
assert_eq!(operands.len(), 1);
let operand = &operands[0];
let value = self.eval_operand(operand)?;
- let value_ty = self.operand_ty(operand);
self.write_maybe_aligned_mut(!variants.packed, |ecx| {
- ecx.write_value(value, dest, value_ty)
+ ecx.write_value(value, dest)
})?;
}
let elem_size = self.type_size(elem_ty)?.expect(
"repeat element type must be sized",
);
- let value = self.eval_operand(operand)?;
+ let value = self.eval_operand(operand)?.value;
// FIXME(solson)
let dest = Pointer::from(self.force_allocation(dest)?.to_ptr()?);
bug!("attempted to take a reference to an enum downcast lvalue")
}
};
- self.write_value(val, dest, dest_ty)?;
+ let valty = ValTy {
+ value: val,
+ ty: dest_ty,
+ };
+ self.write_value(valty, dest)?;
}
NullaryOp(mir::NullOp::Box, ty) => {
match kind {
Unsize => {
let src = self.eval_operand(operand)?;
- let src_ty = self.operand_ty(operand);
- self.unsize_into(src, src_ty, dest, dest_ty)?;
+ self.unsize_into(src.value, src.ty, dest, dest_ty)?;
}
Misc => {
let src = self.eval_operand(operand)?;
- let src_ty = self.operand_ty(operand);
- if self.type_is_fat_ptr(src_ty) {
- match (src, self.type_is_fat_ptr(dest_ty)) {
+ if self.type_is_fat_ptr(src.ty) {
+ match (src.value, self.type_is_fat_ptr(dest_ty)) {
(Value::ByRef { .. }, _) |
(Value::ByValPair(..), true) => {
- self.write_value(src, dest, dest_ty)?;
+ let valty = ValTy {
+ value: src.value,
+ ty: dest_ty,
+ };
+ self.write_value(valty, dest)?;
}
(Value::ByValPair(data, _), false) => {
- self.write_value(Value::ByVal(data), dest, dest_ty)?;
+ let valty = ValTy {
+ value: Value::ByVal(data),
+ ty: dest_ty,
+ };
+ self.write_value(valty, dest)?;
}
(Value::ByVal(_), _) => bug!("expected fat ptr"),
}
} else {
- let src_val = self.value_to_primval(src, src_ty)?;
- let dest_val = self.cast_primval(src_val, src_ty, dest_ty)?;
- self.write_value(Value::ByVal(dest_val), dest, dest_ty)?;
+ let src_val = self.value_to_primval(src)?;
+ let dest_val = self.cast_primval(src_val, src.ty, dest_ty)?;
+ let valty = ValTy {
+ value: Value::ByVal(dest_val),
+ ty: dest_ty,
+ };
+ self.write_value(valty, dest)?;
}
}
ty::TyFnDef(def_id, substs) => {
let instance = resolve(self.tcx, def_id, substs);
let fn_ptr = self.memory.create_fn_alloc(instance);
- self.write_value(
- Value::ByVal(PrimVal::Ptr(fn_ptr)),
- dest,
- dest_ty,
- )?;
+ let valty = ValTy {
+ value: Value::ByVal(PrimVal::Ptr(fn_ptr)),
+ ty: dest_ty,
+ };
+ self.write_value(valty, dest)?;
}
ref other => bug!("reify fn pointer on {:?}", other),
}
UnsafeFnPointer => {
match dest_ty.sty {
ty::TyFnPtr(_) => {
- let src = self.eval_operand(operand)?;
- self.write_value(src, dest, dest_ty)?;
+ let mut src = self.eval_operand(operand)?;
+ src.ty = dest_ty;
+ self.write_value(src, dest)?;
}
ref other => bug!("fn to unsafe fn cast on {:?}", other),
}
ty::ClosureKind::FnOnce,
);
let fn_ptr = self.memory.create_fn_alloc(instance);
- self.write_value(
- Value::ByVal(PrimVal::Ptr(fn_ptr)),
- dest,
- dest_ty,
- )?;
+ let valty = ValTy {
+ value: Value::ByVal(PrimVal::Ptr(fn_ptr)),
+ ty: dest_ty,
+ };
+ self.write_value(valty, dest)?;
}
ref other => bug!("closure fn pointer on {:?}", other),
}
&mut self,
op: &mir::Operand<'tcx>,
) -> EvalResult<'tcx, PrimVal> {
- let value = self.eval_operand(op)?;
- let ty = self.operand_ty(op);
- self.value_to_primval(value, ty)
+ let valty = self.eval_operand(op)?;
+ self.value_to_primval(valty)
+ }
+
+ pub(crate) fn operands_to_args(
+ &mut self,
+ ops: &[mir::Operand<'tcx>],
+ ) -> EvalResult<'tcx, Vec<ValTy<'tcx>>> {
+ ops.into_iter()
+ .map(|op| self.eval_operand(op))
+ .collect()
}
- pub fn eval_operand(&mut self, op: &mir::Operand<'tcx>) -> EvalResult<'tcx, Value> {
+ pub fn eval_operand(&mut self, op: &mir::Operand<'tcx>) -> EvalResult<'tcx, ValTy<'tcx>> {
use rustc::mir::Operand::*;
match *op {
- Consume(ref lvalue) => self.eval_and_read_lvalue(lvalue),
+ Consume(ref lvalue) => {
+ Ok(ValTy {
+ value: self.eval_and_read_lvalue(lvalue)?,
+ ty: self.operand_ty(op),
+ })
+ },
Constant(ref constant) => {
use rustc::mir::Literal;
}
};
- Ok(value)
+ Ok(ValTy {
+ value,
+ ty: self.operand_ty(op),
+ })
}
}
}
}
}
- pub fn value_to_primval(&mut self, value: Value, ty: Ty<'tcx>) -> EvalResult<'tcx, PrimVal> {
+ pub fn value_to_primval(
+ &mut self,
+ ValTy { value, ty } : ValTy<'tcx>,
+ ) -> EvalResult<'tcx, PrimVal> {
match self.follow_by_ref_value(value, ty)? {
Value::ByRef { .. } => bug!("follow_by_ref_value can't result in `ByRef`"),
}
pub fn write_ptr(&mut self, dest: Lvalue, val: Pointer, dest_ty: Ty<'tcx>) -> EvalResult<'tcx> {
- self.write_value(val.to_value(), dest, dest_ty)
+ let valty = ValTy {
+ value: val.to_value(),
+ ty: dest_ty,
+ };
+ self.write_value(valty, dest)
}
pub fn write_primval(
val: PrimVal,
dest_ty: Ty<'tcx>,
) -> EvalResult<'tcx> {
- self.write_value(Value::ByVal(val), dest, dest_ty)
+ let valty = ValTy {
+ value: Value::ByVal(val),
+ ty: dest_ty,
+ };
+ self.write_value(valty, dest)
}
pub fn write_value(
&mut self,
- src_val: Value,
+ ValTy { value: src_val, ty: dest_ty } : ValTy<'tcx>,
dest: Lvalue,
- dest_ty: Ty<'tcx>,
) -> EvalResult<'tcx> {
//trace!("Writing {:?} to {:?} at type {:?}", src_val, dest, dest_ty);
// Note that it is really important that the type here is the right one, and matches the type things are read at.
(&ty::TyArray(_, length), &ty::TySlice(_)) => {
let ptr = src.into_ptr(&self.memory)?;
// u64 cast is from usize to u64, which is always good
- self.write_value(ptr.to_value_with_len(length as u64), dest, dest_ty)
+ let valty = ValTy {
+ value: ptr.to_value_with_len(length as u64),
+ ty: dest_ty,
+ };
+ self.write_value(valty, dest)
}
(&ty::TyDynamic(..), &ty::TyDynamic(..)) => {
// For now, upcasts are limited to changes in marker
// traits, and hence never actually require an actual
// change to the vtable.
- self.write_value(src, dest, dest_ty)
+ let valty = ValTy {
+ value: src,
+ ty: dest_ty,
+ };
+ self.write_value(valty, dest)
}
(_, &ty::TyDynamic(ref data, _)) => {
let trait_ref = data.principal().unwrap().with_self_ty(
let trait_ref = self.tcx.erase_regions(&trait_ref);
let vtable = self.get_vtable(src_pointee_ty, trait_ref)?;
let ptr = src.into_ptr(&self.memory)?;
- self.write_value(ptr.to_value_with_vtable(vtable), dest, dest_ty)
+ let valty = ValTy {
+ value: ptr.to_value_with_vtable(vtable),
+ ty: dest_ty,
+ };
+ self.write_value(valty, dest)
}
_ => bug!("invalid unsizing {:?} -> {:?}", src_ty, dest_ty),
}
Index(ref operand) => {
- // FIXME(solson)
let n_ptr = self.eval_operand(operand)?;
- let usize = self.tcx.types.usize;
- let n = self.value_to_primval(n_ptr, usize)?.to_u64()?;
+ let n = self.value_to_primval(n_ptr)?.to_u64()?;
return self.lvalue_index(base, base_ty, n);
}
//! This separation exists to ensure that no fancy miri features like
//! interpreting common C functions leak into CTFE.
-use super::{EvalResult, EvalContext, Lvalue, PrimVal};
+use super::{EvalResult, EvalContext, Lvalue, PrimVal, ValTy};
use rustc::{mir, ty};
use syntax::codemap::Span;
ecx: &mut EvalContext<'a, 'tcx, Self>,
instance: ty::Instance<'tcx>,
destination: Option<(Lvalue, mir::BasicBlock)>,
- arg_operands: &[mir::Operand<'tcx>],
+ args: &[ValTy<'tcx>],
span: Span,
sig: ty::FnSig<'tcx>,
) -> EvalResult<'tcx, bool>;
fn call_intrinsic<'a>(
ecx: &mut EvalContext<'a, 'tcx, Self>,
instance: ty::Instance<'tcx>,
- args: &[mir::Operand<'tcx>],
+ args: &[ValTy<'tcx>],
dest: Lvalue,
dest_ty: ty::Ty<'tcx>,
dest_layout: &'tcx ty::layout::Layout,
pub use self::error::{EvalError, EvalResult, EvalErrorKind};
pub use self::eval_context::{EvalContext, Frame, ResourceLimits, StackPopCleanup, DynamicLifetime,
- TyAndPacked, PtrAndAlign};
+ TyAndPacked, PtrAndAlign, ValTy};
pub use self::lvalue::{Lvalue, LvalueExtra, GlobalId};
use rustc::mir;
use rustc::ty::Ty;
-use super::{EvalResult, EvalContext, Lvalue, Machine};
+use super::{EvalResult, EvalContext, Lvalue, Machine, ValTy};
use super::value::{PrimVal, PrimValKind, Value, bytes_to_f32, bytes_to_f64, f32_to_bytes,
f64_to_bytes};
fn binop_with_overflow(
&mut self,
op: mir::BinOp,
- left: &mir::Operand<'tcx>,
- right: &mir::Operand<'tcx>,
+ left: ValTy<'tcx>,
+ right: ValTy<'tcx>,
) -> EvalResult<'tcx, (PrimVal, bool)> {
- let left_ty = self.operand_ty(left);
- let right_ty = self.operand_ty(right);
- let left_val = self.eval_operand_to_primval(left)?;
- let right_val = self.eval_operand_to_primval(right)?;
- self.binary_op(op, left_val, left_ty, right_val, right_ty)
+ let left_val = self.value_to_primval(left)?;
+ let right_val = self.value_to_primval(right)?;
+ self.binary_op(op, left_val, left.ty, right_val, right.ty)
}
/// Applies the binary operation `op` to the two operands and writes a tuple of the result
pub fn intrinsic_with_overflow(
&mut self,
op: mir::BinOp,
- left: &mir::Operand<'tcx>,
- right: &mir::Operand<'tcx>,
+ left: ValTy<'tcx>,
+ right: ValTy<'tcx>,
dest: Lvalue,
dest_ty: Ty<'tcx>,
) -> EvalResult<'tcx> {
let (val, overflowed) = self.binop_with_overflow(op, left, right)?;
let val = Value::ByValPair(val, PrimVal::from_bool(overflowed));
- self.write_value(val, dest, dest_ty)
+ let valty = ValTy {
+ value: val,
+ ty: dest_ty,
+ };
+ self.write_value(valty, dest)
}
/// Applies the binary operation `op` to the arguments and writes the result to the
pub fn intrinsic_overflowing(
&mut self,
op: mir::BinOp,
- left: &mir::Operand<'tcx>,
- right: &mir::Operand<'tcx>,
+ left: ValTy<'tcx>,
+ right: ValTy<'tcx>,
dest: Lvalue,
dest_ty: Ty<'tcx>,
) -> EvalResult<'tcx, bool> {
use syntax::codemap::Span;
use interpret::{EvalResult, EvalContext, StackPopCleanup, Lvalue, LvalueExtra, PrimVal, Value,
- Machine};
+ Machine, ValTy};
impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
pub(crate) fn drop_lvalue(
let arg_local = arg_locals.next().unwrap();
let dest = self.eval_lvalue(&mir::Lvalue::Local(arg_local))?;
let arg_ty = self.tcx.mk_mut_ptr(ty);
- self.write_value(arg, dest, arg_ty)
+ let valty = ValTy {
+ value: arg,
+ ty: arg_ty,
+ };
+ self.write_value(valty, dest)
}
}
use syntax::abi::Abi;
use super::{EvalError, EvalResult, EvalErrorKind, EvalContext, eval_context, TyAndPacked,
- PtrAndAlign, Lvalue, MemoryPointer, PrimVal, Value, Machine, HasMemory};
+ PtrAndAlign, Lvalue, MemoryPointer, PrimVal, Value, Machine, HasMemory, ValTy};
use super::eval_context::IntegerExt;
use rustc_data_structures::indexed_vec::Idx;
} => {
// FIXME(CTFE): forbid branching
let discr_val = self.eval_operand(discr)?;
- let discr_ty = self.operand_ty(discr);
- let discr_prim = self.value_to_primval(discr_val, discr_ty)?;
+ let discr_prim = self.value_to_primval(discr_val)?;
// Branch to the `otherwise` case by default, if no match is found.
let mut target_block = targets[targets.len() - 1];
return err!(Unimplemented(msg));
}
};
+ let args = self.operands_to_args(args)?;
let sig = self.erase_lifetimes(&sig);
self.eval_fn_call(
fn_def,
destination,
- args,
+ &args,
terminator.source_info.span,
sig,
)?;
&mut self,
instance: ty::Instance<'tcx>,
destination: Option<(Lvalue, mir::BasicBlock)>,
- arg_operands: &[mir::Operand<'tcx>],
+ args: &[ValTy<'tcx>],
span: Span,
sig: ty::FnSig<'tcx>,
) -> EvalResult<'tcx> {
return err!(Unreachable);
}
let layout = self.type_layout(ty)?;
- M::call_intrinsic(self, instance, arg_operands, ret, ty, layout, target)?;
+ M::call_intrinsic(self, instance, args, ret, ty, layout, target)?;
self.dump_local(ret);
Ok(())
}
// FIXME: figure out why we can't just go through the shim
ty::InstanceDef::ClosureOnceShim { .. } => {
- let mut args = Vec::new();
- for arg in arg_operands {
- let arg_val = self.eval_operand(arg)?;
- let arg_ty = self.operand_ty(arg);
- args.push((arg_val, arg_ty));
- }
- if M::eval_fn_call(self, instance, destination, arg_operands, span, sig)? {
+ if M::eval_fn_call(self, instance, destination, args, span, sig)? {
return Ok(());
}
let mut arg_locals = self.frame().mir.args_iter();
match sig.abi {
// closure as closure once
Abi::RustCall => {
- for (arg_local, (arg_val, arg_ty)) in arg_locals.zip(args) {
+ for (arg_local, &valty) in arg_locals.zip(args) {
let dest = self.eval_lvalue(&mir::Lvalue::Local(arg_local))?;
- self.write_value(arg_val, dest, arg_ty)?;
+ self.write_value(valty, dest)?;
}
}
// non capture closure as fn ptr
"arg_locals: {:?}",
self.frame().mir.args_iter().collect::<Vec<_>>()
);
- trace!("arg_operands: {:?}", arg_operands);
+ trace!("args: {:?}", args);
let local = arg_locals.nth(1).unwrap();
- for (i, (arg_val, arg_ty)) in args.into_iter().enumerate() {
+ for (i, &valty) in args.into_iter().enumerate() {
let dest = self.eval_lvalue(&mir::Lvalue::Local(local).field(
mir::Field::new(i),
- arg_ty,
+ valty.ty,
))?;
- self.write_value(arg_val, dest, arg_ty)?;
+ self.write_value(valty, dest)?;
}
}
_ => bug!("bad ABI for ClosureOnceShim: {:?}", sig.abi),
ty::InstanceDef::DropGlue(..) |
ty::InstanceDef::CloneShim(..) |
ty::InstanceDef::Item(_) => {
- let mut args = Vec::new();
- for arg in arg_operands {
- let arg_val = self.eval_operand(arg)?;
- let arg_ty = self.operand_ty(arg);
- args.push((arg_val, arg_ty));
- }
-
// Push the stack frame, and potentially be entirely done if the call got hooked
- if M::eval_fn_call(self, instance, destination, arg_operands, span, sig)? {
+ if M::eval_fn_call(self, instance, destination, args, span, sig)? {
return Ok(());
}
"arg_locals: {:?}",
self.frame().mir.args_iter().collect::<Vec<_>>()
);
- trace!("arg_operands: {:?}", arg_operands);
+ trace!("args: {:?}", args);
match sig.abi {
Abi::RustCall => {
assert_eq!(args.len(), 2);
// write first argument
let first_local = arg_locals.next().unwrap();
let dest = self.eval_lvalue(&mir::Lvalue::Local(first_local))?;
- let (arg_val, arg_ty) = args.remove(0);
- self.write_value(arg_val, dest, arg_ty)?;
+ self.write_value(args[0], dest)?;
}
// unpack and write all other args
- let (arg_val, arg_ty) = args.remove(0);
- let layout = self.type_layout(arg_ty)?;
+ let layout = self.type_layout(args[1].ty)?;
if let (&ty::TyTuple(fields, _),
- &Layout::Univariant { ref variant, .. }) = (&arg_ty.sty, layout)
+ &Layout::Univariant { ref variant, .. }) = (&args[1].ty.sty, layout)
{
trace!("fields: {:?}", fields);
if self.frame().mir.args_iter().count() == fields.len() + 1 {
let offsets = variant.offsets.iter().map(|s| s.bytes());
- match arg_val {
+ match args[1].value {
Value::ByRef(PtrAndAlign { ptr, aligned }) => {
assert!(
aligned,
dest,
ty
);
- self.write_value(arg, dest, ty)?;
+ let valty = ValTy {
+ value: arg,
+ ty,
+ };
+ self.write_value(valty, dest)?;
}
}
Value::ByVal(PrimVal::Undef) => {}
let dest = self.eval_lvalue(&mir::Lvalue::Local(
arg_locals.next().unwrap(),
))?;
- self.write_value(other, dest, fields[0])?;
+ let valty = ValTy {
+ value: other,
+ ty: fields[0],
+ };
+ self.write_value(valty, dest)?;
}
}
} else {
let dest = self.eval_lvalue(
&mir::Lvalue::Local(arg_locals.next().unwrap()),
)?;
- self.write_value(arg_val, dest, arg_ty)?;
+ self.write_value(args[1], dest)?;
}
} else {
bug!(
- "rust-call ABI tuple argument was {:?}, {:?}",
- arg_ty,
+ "rust-call ABI tuple argument was {:#?}, {:#?}",
+ args[1].ty,
layout
);
}
}
_ => {
- for (arg_local, (arg_val, arg_ty)) in arg_locals.zip(args) {
+ for (arg_local, &valty) in arg_locals.zip(args) {
let dest = self.eval_lvalue(&mir::Lvalue::Local(arg_local))?;
- self.write_value(arg_val, dest, arg_ty)?;
+ self.write_value(valty, dest)?;
}
}
}
// cannot use the shim here, because that will only result in infinite recursion
ty::InstanceDef::Virtual(_, idx) => {
let ptr_size = self.memory.pointer_size();
- let (_, vtable) = self.eval_operand(&arg_operands[0])?.into_ptr_vtable_pair(
- &self.memory,
- )?;
+ let (ptr, vtable) = args[0].into_ptr_vtable_pair(&self.memory)?;
let fn_ptr = self.memory.read_ptr(
vtable.offset(ptr_size * (idx as u64 + 3), &self)?,
)?;
let instance = self.memory.get_fn(fn_ptr.to_ptr()?)?;
- let mut arg_operands = arg_operands.to_vec();
- let ty = self.operand_ty(&arg_operands[0]);
- let ty = self.get_field_ty(ty, 0)?.ty; // TODO: packed flag is ignored
- match arg_operands[0] {
- mir::Operand::Consume(ref mut lval) => {
- *lval = lval.clone().field(mir::Field::new(0), ty)
- }
- _ => bug!("virtual call first arg cannot be a constant"),
- }
+ let mut args = args.to_vec();
+ let ty = self.get_field_ty(args[0].ty, 0)?.ty; // TODO: packed flag is ignored
+ args[0].ty = ty;
+ args[0].value = ptr.to_value();
// recurse with concrete function
- self.eval_fn_call(instance, destination, &arg_operands, span, sig)
+ self.eval_fn_call(instance, destination, &args, span, sig)
}
}
}