1 use rustc::middle::const_val;
2 use rustc::hir::def_id::DefId;
3 use rustc::mir::mir_map::MirMap;
4 use rustc::mir::repr as mir;
5 use rustc::traits::Reveal;
6 use rustc::ty::layout::{self, Layout, Size};
7 use rustc::ty::subst::{self, Subst, Substs};
8 use rustc::ty::{self, Ty, TyCtxt};
9 use rustc::util::nodemap::DefIdMap;
10 use rustc_data_structures::indexed_vec::Idx;
11 use std::cell::RefCell;
15 use syntax::codemap::{self, DUMMY_SP};
17 use error::{EvalError, EvalResult};
18 use memory::{Memory, Pointer};
19 use primval::{self, PrimVal};
21 use std::collections::HashMap;
26 pub struct EvalContext<'a, 'tcx: 'a> {
27 /// The results of the type checker, from rustc.
28 tcx: TyCtxt<'a, 'tcx, 'tcx>,
30 /// A mapping from NodeIds to Mir, from rustc. Only contains MIR for crate-local items.
31 mir_map: &'a MirMap<'tcx>,
33 /// A local cache from DefIds to Mir for non-crate-local items.
34 mir_cache: RefCell<DefIdMap<Rc<mir::Mir<'tcx>>>>,
36 /// The virtual memory system.
37 memory: Memory<'a, 'tcx>,
39 /// Precomputed statics, constants and promoteds.
40 statics: HashMap<ConstantId<'tcx>, Pointer>,
42 /// The virtual call stack.
43 stack: Vec<Frame<'a, 'tcx>>,
45 /// The maximum number of stack frames allowed
50 pub struct Frame<'a, 'tcx: 'a> {
51 ////////////////////////////////////////////////////////////////////////////////
52 // Function and callsite information
53 ////////////////////////////////////////////////////////////////////////////////
55 /// The MIR for the function called on this frame.
56 pub mir: CachedMir<'a, 'tcx>,
58 /// The def_id of the current function.
61 /// type substitutions for the current function invocation.
62 pub substs: &'tcx Substs<'tcx>,
64 /// The span of the call site.
65 pub span: codemap::Span,
67 ////////////////////////////////////////////////////////////////////////////////
68 // Return pointer and local allocations
69 ////////////////////////////////////////////////////////////////////////////////
71 /// A pointer for writing the return value of the current call if it's not a diverging call.
72 pub return_ptr: Option<Pointer>,
74 /// The list of locals for the current function, stored in order as
75 /// `[arguments..., variables..., temporaries...]`. The variables begin at `self.var_offset`
76 /// and the temporaries at `self.temp_offset`.
77 pub locals: Vec<Pointer>,
79 /// The offset of the first variable in `self.locals`.
80 pub var_offset: usize,
82 /// The offset of the first temporary in `self.locals`.
83 pub temp_offset: usize,
85 ////////////////////////////////////////////////////////////////////////////////
86 // Current position within the function
87 ////////////////////////////////////////////////////////////////////////////////
89 /// The block that is currently executed (or will be executed after the above call stacks
91 pub block: mir::BasicBlock,
93 /// The index of the currently evaluated statment.
97 #[derive(Copy, Clone, Debug, Eq, PartialEq)]
103 #[derive(Copy, Clone, Debug, Eq, PartialEq)]
107 // TODO(solson): Vtable(memory::AllocId),
108 DowncastVariant(usize),
112 pub enum CachedMir<'mir, 'tcx: 'mir> {
113 Ref(&'mir mir::Mir<'tcx>),
114 Owned(Rc<mir::Mir<'tcx>>)
117 #[derive(Clone, Debug, Eq, PartialEq, Hash)]
118 /// Uniquely identifies a specific constant or static
119 struct ConstantId<'tcx> {
120 /// the def id of the constant/static or in case of promoteds, the def id of the function they belong to
122 /// In case of statics and constants this is `Substs::empty()`, so only promoteds and associated
123 /// constants actually have something useful here. We could special case statics and constants,
124 /// but that would only require more branching when working with constants, and not bring any
126 substs: &'tcx Substs<'tcx>,
130 #[derive(Clone, Debug, Eq, PartialEq, Hash)]
132 Promoted(mir::Promoted),
133 /// Statics, constants and associated constants
137 impl<'a, 'tcx> EvalContext<'a, 'tcx> {
138 pub fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>, mir_map: &'a MirMap<'tcx>, memory_size: usize, stack_limit: usize) -> Self {
142 mir_cache: RefCell::new(DefIdMap()),
143 memory: Memory::new(&tcx.data_layout, memory_size),
144 statics: HashMap::new(),
146 stack_limit: stack_limit,
150 pub fn alloc_ret_ptr(&mut self, ty: Ty<'tcx>, substs: &'tcx Substs<'tcx>) -> EvalResult<'tcx, Pointer> {
151 let size = self.type_size_with_substs(ty, substs);
152 let align = self.type_align_with_substs(ty, substs);
153 self.memory.allocate(size, align)
156 pub fn memory(&self) -> &Memory<'a, 'tcx> {
160 pub fn memory_mut(&mut self) -> &mut Memory<'a, 'tcx> {
164 pub fn stack(&self) -> &[Frame<'a, 'tcx>] {
168 // TODO(solson): Try making const_to_primval instead.
169 fn const_to_ptr(&mut self, const_val: &const_val::ConstVal) -> EvalResult<'tcx, Pointer> {
170 use rustc::middle::const_val::ConstVal::*;
171 use rustc_const_math::{ConstInt, ConstIsize, ConstUsize, ConstFloat};
173 ($i:ident, $n:expr) => {{
174 let ptr = self.memory.allocate($n, $n)?;
175 self.memory.write_int(ptr, $i as i64, $n)?;
180 Float(ConstFloat::F32(f)) => {
181 let ptr = self.memory.allocate(4, 4)?;
182 self.memory.write_f32(ptr, f)?;
185 Float(ConstFloat::F64(f)) => {
186 let ptr = self.memory.allocate(8, 8)?;
187 self.memory.write_f64(ptr, f)?;
190 Float(ConstFloat::FInfer{..}) => unreachable!(),
191 Integral(ConstInt::Infer(_)) => unreachable!(),
192 Integral(ConstInt::InferSigned(_)) => unreachable!(),
193 Integral(ConstInt::I8(i)) => i2p!(i, 1),
194 Integral(ConstInt::U8(i)) => i2p!(i, 1),
195 Integral(ConstInt::Isize(ConstIsize::Is16(i))) |
196 Integral(ConstInt::I16(i)) => i2p!(i, 2),
197 Integral(ConstInt::Usize(ConstUsize::Us16(i))) |
198 Integral(ConstInt::U16(i)) => i2p!(i, 2),
199 Integral(ConstInt::Isize(ConstIsize::Is32(i))) |
200 Integral(ConstInt::I32(i)) => i2p!(i, 4),
201 Integral(ConstInt::Usize(ConstUsize::Us32(i))) |
202 Integral(ConstInt::U32(i)) => i2p!(i, 4),
203 Integral(ConstInt::Isize(ConstIsize::Is64(i))) |
204 Integral(ConstInt::I64(i)) => i2p!(i, 8),
205 Integral(ConstInt::Usize(ConstUsize::Us64(i))) |
206 Integral(ConstInt::U64(i)) => i2p!(i, 8),
208 let psize = self.memory.pointer_size();
209 let static_ptr = self.memory.allocate(s.len(), 1)?;
210 let ptr = self.memory.allocate(psize * 2, psize)?;
211 self.memory.write_bytes(static_ptr, s.as_bytes())?;
212 self.memory.write_ptr(ptr, static_ptr)?;
213 self.memory.write_usize(ptr.offset(psize as isize), s.len() as u64)?;
217 let psize = self.memory.pointer_size();
218 let static_ptr = self.memory.allocate(bs.len(), 1)?;
219 let ptr = self.memory.allocate(psize, psize)?;
220 self.memory.write_bytes(static_ptr, bs)?;
221 self.memory.write_ptr(ptr, static_ptr)?;
225 let ptr = self.memory.allocate(1, 1)?;
226 self.memory.write_bool(ptr, b)?;
230 let ptr = self.memory.allocate(4, 4)?;
231 self.memory.write_uint(ptr, c as u64, 4)?;
234 Struct(_node_id) => unimplemented!(),
235 Tuple(_node_id) => unimplemented!(),
236 Function(_def_id) => unimplemented!(),
237 Array(_, _) => unimplemented!(),
238 Repeat(_, _) => unimplemented!(),
239 Dummy => unimplemented!(),
243 fn type_is_sized(&self, ty: Ty<'tcx>) -> bool {
244 ty.is_sized(self.tcx, &self.tcx.empty_parameter_environment(), DUMMY_SP)
247 pub fn load_mir(&self, def_id: DefId) -> CachedMir<'a, 'tcx> {
248 if def_id.is_local() {
249 CachedMir::Ref(self.mir_map.map.get(&def_id).unwrap())
251 let mut mir_cache = self.mir_cache.borrow_mut();
252 if let Some(mir) = mir_cache.get(&def_id) {
253 return CachedMir::Owned(mir.clone());
256 let cs = &self.tcx.sess.cstore;
257 let mir = cs.maybe_get_item_mir(self.tcx, def_id).unwrap_or_else(|| {
258 panic!("no mir for `{}`", self.tcx.item_path_str(def_id));
260 let cached = Rc::new(mir);
261 mir_cache.insert(def_id, cached.clone());
262 CachedMir::Owned(cached)
266 pub fn monomorphize(&self, ty: Ty<'tcx>, substs: &'tcx Substs<'tcx>) -> Ty<'tcx> {
267 let substituted = ty.subst(self.tcx, substs);
268 self.tcx.normalize_associated_type(&substituted)
271 fn type_size(&self, ty: Ty<'tcx>) -> usize {
272 self.type_size_with_substs(ty, self.substs())
275 fn type_align(&self, ty: Ty<'tcx>) -> usize {
276 self.type_align_with_substs(ty, self.substs())
279 fn type_size_with_substs(&self, ty: Ty<'tcx>, substs: &'tcx Substs<'tcx>) -> usize {
280 self.type_layout_with_substs(ty, substs).size(&self.tcx.data_layout).bytes() as usize
283 fn type_align_with_substs(&self, ty: Ty<'tcx>, substs: &'tcx Substs<'tcx>) -> usize {
284 self.type_layout_with_substs(ty, substs).align(&self.tcx.data_layout).abi() as usize
287 fn type_layout(&self, ty: Ty<'tcx>) -> &'tcx Layout {
288 self.type_layout_with_substs(ty, self.substs())
291 fn type_layout_with_substs(&self, ty: Ty<'tcx>, substs: &'tcx Substs<'tcx>) -> &'tcx Layout {
292 // TODO(solson): Is this inefficient? Needs investigation.
293 let ty = self.monomorphize(ty, substs);
295 self.tcx.normalizing_infer_ctxt(Reveal::All).enter(|infcx| {
296 // TODO(solson): Report this error properly.
297 ty.layout(&infcx).unwrap()
301 pub fn push_stack_frame(
305 mir: CachedMir<'a, 'tcx>,
306 substs: &'tcx Substs<'tcx>,
307 return_ptr: Option<Pointer>,
308 ) -> EvalResult<'tcx, ()> {
309 let arg_tys = mir.arg_decls.iter().map(|a| a.ty);
310 let var_tys = mir.var_decls.iter().map(|v| v.ty);
311 let temp_tys = mir.temp_decls.iter().map(|t| t.ty);
313 let num_args = mir.arg_decls.len();
314 let num_vars = mir.var_decls.len();
316 ::log_settings::settings().indentation += 1;
318 let locals: EvalResult<'tcx, Vec<Pointer>> = arg_tys.chain(var_tys).chain(temp_tys).map(|ty| {
319 let size = self.type_size_with_substs(ty, substs);
320 let align = self.type_align_with_substs(ty, substs);
321 self.memory.allocate(size, align)
324 self.stack.push(Frame {
326 block: mir::START_BLOCK,
327 return_ptr: return_ptr,
329 var_offset: num_args,
330 temp_offset: num_args + num_vars,
336 if self.stack.len() > self.stack_limit {
337 Err(EvalError::StackFrameLimitReached)
343 fn pop_stack_frame(&mut self) {
344 ::log_settings::settings().indentation -= 1;
345 let _frame = self.stack.pop().expect("tried to pop a stack frame, but there were none");
346 // TODO(solson): Deallocate local variables.
349 /// Applies the binary operation `op` to the two operands and writes a tuple of the result
350 /// and a boolean signifying the potential overflow to the destination.
351 fn intrinsic_with_overflow(
354 left: &mir::Operand<'tcx>,
355 right: &mir::Operand<'tcx>,
357 dest_layout: &'tcx Layout,
358 ) -> EvalResult<'tcx, ()> {
359 use rustc::ty::layout::Layout::*;
360 let tup_layout = match *dest_layout {
361 Univariant { ref variant, .. } => variant,
362 _ => panic!("checked bin op returns something other than a tuple"),
365 let overflowed = self.intrinsic_overflowing(op, left, right, dest)?;
366 let offset = tup_layout.field_offset(1).bytes() as isize;
367 self.memory.write_bool(dest.offset(offset), overflowed)
370 /// Applies the binary operation `op` to the arguments and writes the result to the destination.
371 /// Returns `true` if the operation overflowed.
372 fn intrinsic_overflowing(
375 left: &mir::Operand<'tcx>,
376 right: &mir::Operand<'tcx>,
378 ) -> EvalResult<'tcx, bool> {
379 let left_ptr = self.eval_operand(left)?;
380 let left_ty = self.operand_ty(left);
381 let left_val = self.read_primval(left_ptr, left_ty)?;
383 let right_ptr = self.eval_operand(right)?;
384 let right_ty = self.operand_ty(right);
385 let right_val = self.read_primval(right_ptr, right_ty)?;
387 let (val, overflow) = primval::binary_op(op, left_val, right_val)?;
388 self.memory.write_primval(dest, val)?;
392 fn assign_fields<I: IntoIterator<Item = u64>>(
396 operands: &[mir::Operand<'tcx>],
397 ) -> EvalResult<'tcx, ()> {
398 for (offset, operand) in offsets.into_iter().zip(operands) {
399 let src = self.eval_operand(operand)?;
400 let src_ty = self.operand_ty(operand);
401 let field_dest = dest.offset(offset as isize);
402 self.move_(src, field_dest, src_ty)?;
407 fn eval_assignment(&mut self, lvalue: &mir::Lvalue<'tcx>, rvalue: &mir::Rvalue<'tcx>)
408 -> EvalResult<'tcx, ()>
410 let dest = self.eval_lvalue(lvalue)?.to_ptr();
411 let dest_ty = self.lvalue_ty(lvalue);
412 let dest_layout = self.type_layout(dest_ty);
414 use rustc::mir::repr::Rvalue::*;
416 Use(ref operand) => {
417 let src = self.eval_operand(operand)?;
418 self.move_(src, dest, dest_ty)?;
421 BinaryOp(bin_op, ref left, ref right) => {
422 // ignore overflow bit, rustc inserts check branches for us
423 self.intrinsic_overflowing(bin_op, left, right, dest)?;
426 CheckedBinaryOp(bin_op, ref left, ref right) => {
427 self.intrinsic_with_overflow(bin_op, left, right, dest, dest_layout)?;
430 UnaryOp(un_op, ref operand) => {
431 let ptr = self.eval_operand(operand)?;
432 let ty = self.operand_ty(operand);
433 let val = self.read_primval(ptr, ty)?;
434 self.memory.write_primval(dest, primval::unary_op(un_op, val)?)?;
437 Aggregate(ref kind, ref operands) => {
438 use rustc::ty::layout::Layout::*;
440 Univariant { ref variant, .. } => {
441 let offsets = iter::once(0)
442 .chain(variant.offset_after_field.iter().map(|s| s.bytes()));
443 self.assign_fields(dest, offsets, operands)?;
447 let elem_size = match dest_ty.sty {
448 ty::TyArray(elem_ty, _) => self.type_size(elem_ty) as u64,
449 _ => panic!("tried to assign {:?} to non-array type {:?}",
452 let offsets = (0..).map(|i| i * elem_size);
453 self.assign_fields(dest, offsets, operands)?;
456 General { discr, ref variants, .. } => {
457 if let mir::AggregateKind::Adt(adt_def, variant, _) = *kind {
458 let discr_val = adt_def.variants[variant].disr_val.to_u64_unchecked();
459 let discr_size = discr.size().bytes() as usize;
460 self.memory.write_uint(dest, discr_val, discr_size)?;
462 let offsets = variants[variant].offset_after_field.iter()
464 self.assign_fields(dest, offsets, operands)?;
466 panic!("tried to assign {:?} to Layout::General", kind);
470 RawNullablePointer { nndiscr, .. } => {
471 if let mir::AggregateKind::Adt(_, variant, _) = *kind {
472 if nndiscr == variant as u64 {
473 assert_eq!(operands.len(), 1);
474 let operand = &operands[0];
475 let src = self.eval_operand(operand)?;
476 let src_ty = self.operand_ty(operand);
477 self.move_(src, dest, src_ty)?;
479 assert_eq!(operands.len(), 0);
480 self.memory.write_isize(dest, 0)?;
483 panic!("tried to assign {:?} to Layout::RawNullablePointer", kind);
487 StructWrappedNullablePointer { nndiscr, ref nonnull, ref discrfield } => {
488 if let mir::AggregateKind::Adt(_, variant, _) = *kind {
489 if nndiscr == variant as u64 {
490 let offsets = iter::once(0)
491 .chain(nonnull.offset_after_field.iter().map(|s| s.bytes()));
492 try!(self.assign_fields(dest, offsets, operands));
494 assert_eq!(operands.len(), 0);
495 let offset = self.nonnull_offset(dest_ty, nndiscr, discrfield)?;
496 let dest = dest.offset(offset.bytes() as isize);
497 try!(self.memory.write_isize(dest, 0));
500 panic!("tried to assign {:?} to Layout::RawNullablePointer", kind);
504 CEnum { discr, signed, .. } => {
505 assert_eq!(operands.len(), 0);
506 if let mir::AggregateKind::Adt(adt_def, variant, _) = *kind {
507 let val = adt_def.variants[variant].disr_val.to_u64_unchecked();
508 let size = discr.size().bytes() as usize;
511 self.memory.write_int(dest, val as i64, size)?;
513 self.memory.write_uint(dest, val, size)?;
516 panic!("tried to assign {:?} to Layout::CEnum", kind);
520 _ => return Err(EvalError::Unimplemented(format!("can't handle destination layout {:?} when assigning {:?}", dest_layout, kind))),
524 Repeat(ref operand, _) => {
525 let (elem_size, elem_align, length) = match dest_ty.sty {
526 ty::TyArray(elem_ty, n) => (self.type_size(elem_ty), self.type_align(elem_ty), n),
527 _ => panic!("tried to assign array-repeat to non-array type {:?}", dest_ty),
530 let src = self.eval_operand(operand)?;
532 let elem_dest = dest.offset((i * elem_size) as isize);
533 self.memory.copy(src, elem_dest, elem_size, elem_align)?;
538 let src = self.eval_lvalue(lvalue)?;
539 let ty = self.lvalue_ty(lvalue);
540 let len = match ty.sty {
541 ty::TyArray(_, n) => n as u64,
542 ty::TySlice(_) => if let LvalueExtra::Length(n) = src.extra {
545 panic!("Rvalue::Len of a slice given non-slice pointer: {:?}", src);
547 _ => panic!("Rvalue::Len expected array or slice, got {:?}", ty),
549 self.memory.write_usize(dest, len)?;
552 Ref(_, _, ref lvalue) => {
553 let lv = self.eval_lvalue(lvalue)?;
554 self.memory.write_ptr(dest, lv.ptr)?;
556 LvalueExtra::None => {},
557 LvalueExtra::Length(len) => {
558 let len_ptr = dest.offset(self.memory.pointer_size() as isize);
559 self.memory.write_usize(len_ptr, len)?;
561 LvalueExtra::DowncastVariant(..) =>
562 panic!("attempted to take a reference to an enum downcast lvalue"),
567 let size = self.type_size(ty);
568 let align = self.type_align(ty);
569 let ptr = self.memory.allocate(size, align)?;
570 self.memory.write_ptr(dest, ptr)?;
573 Cast(kind, ref operand, dest_ty) => {
574 use rustc::mir::repr::CastKind::*;
577 let src = self.eval_operand(operand)?;
578 let src_ty = self.operand_ty(operand);
579 self.move_(src, dest, src_ty)?;
580 let src_pointee_ty = pointee_type(src_ty).unwrap();
581 let dest_pointee_ty = pointee_type(dest_ty).unwrap();
583 match (&src_pointee_ty.sty, &dest_pointee_ty.sty) {
584 (&ty::TyArray(_, length), &ty::TySlice(_)) => {
585 let len_ptr = dest.offset(self.memory.pointer_size() as isize);
586 self.memory.write_usize(len_ptr, length as u64)?;
589 _ => return Err(EvalError::Unimplemented(format!("can't handle cast: {:?}", rvalue))),
594 let src = self.eval_operand(operand)?;
595 let src_ty = self.operand_ty(operand);
596 // FIXME(solson): Wrong for almost everything.
597 warn!("misc cast from {:?} to {:?}", src_ty, dest_ty);
598 let dest_size = self.type_size(dest_ty);
599 let src_size = self.type_size(src_ty);
600 let dest_align = self.type_align(dest_ty);
602 // Hack to support fat pointer -> thin pointer casts to keep tests for
603 // other things passing for now.
604 let is_fat_ptr_cast = pointee_type(src_ty).map_or(false, |ty| !self.type_is_sized(ty));
606 if dest_size == src_size || is_fat_ptr_cast {
607 self.memory.copy(src, dest, dest_size, dest_align)?;
609 return Err(EvalError::Unimplemented(format!("can't handle cast: {:?}", rvalue)));
613 ReifyFnPointer => match self.operand_ty(operand).sty {
614 ty::TyFnDef(def_id, substs, fn_ty) => {
615 let fn_ptr = self.memory.create_fn_ptr(def_id, substs, fn_ty);
616 self.memory.write_ptr(dest, fn_ptr)?;
618 ref other => panic!("reify fn pointer on {:?}", other),
621 UnsafeFnPointer => match dest_ty.sty {
622 ty::TyFnPtr(unsafe_fn_ty) => {
623 let src = self.eval_operand(operand)?;
624 let ptr = self.memory.read_ptr(src)?;
625 let fn_def = self.memory.get_fn(ptr.alloc_id)?;
626 let fn_ptr = self.memory.create_fn_ptr(fn_def.def_id, fn_def.substs, unsafe_fn_ty);
627 self.memory.write_ptr(dest, fn_ptr)?;
629 ref other => panic!("fn to unsafe fn cast on {:?}", other),
634 InlineAsm { .. } => unimplemented!(),
640 fn nonnull_offset(&self, ty: Ty<'tcx>, nndiscr: u64, discrfield: &[u32]) -> EvalResult<'tcx, Size> {
641 // Skip the constant 0 at the start meant for LLVM GEP.
642 let mut path = discrfield.iter().skip(1).map(|&i| i as usize);
644 // Handle the field index for the outer non-null variant.
645 let inner_ty = match ty.sty {
646 ty::TyEnum(adt_def, substs) => {
647 let variant = &adt_def.variants[nndiscr as usize];
648 let index = path.next().unwrap();
649 let field = &variant.fields[index];
650 field.ty(self.tcx, substs)
653 "non-enum for StructWrappedNullablePointer: {}",
658 self.field_path_offset(inner_ty, path)
661 fn field_path_offset<I: Iterator<Item = usize>>(&self, mut ty: Ty<'tcx>, path: I) -> EvalResult<'tcx, Size> {
662 let mut offset = Size::from_bytes(0);
664 // Skip the initial 0 intended for LLVM GEP.
665 for field_index in path {
666 let field_offset = self.get_field_offset(ty, field_index)?;
667 ty = self.get_field_ty(ty, field_index)?;
668 offset = offset.checked_add(field_offset, &self.tcx.data_layout).unwrap();
674 fn get_field_ty(&self, ty: Ty<'tcx>, field_index: usize) -> EvalResult<'tcx, Ty<'tcx>> {
676 ty::TyStruct(adt_def, substs) => {
677 Ok(adt_def.struct_variant().fields[field_index].ty(self.tcx, substs))
680 ty::TyRef(_, ty::TypeAndMut { ty, .. }) |
681 ty::TyRawPtr(ty::TypeAndMut { ty, .. }) |
683 assert_eq!(field_index, 0);
686 _ => Err(EvalError::Unimplemented(format!("can't handle type: {:?}", ty))),
690 fn get_field_offset(&self, ty: Ty<'tcx>, field_index: usize) -> EvalResult<'tcx, Size> {
691 let layout = self.type_layout(ty);
693 use rustc::ty::layout::Layout::*;
695 Univariant { .. } => {
696 assert_eq!(field_index, 0);
697 Ok(Size::from_bytes(0))
699 FatPointer { .. } => {
700 let bytes = layout::FAT_PTR_ADDR * self.memory.pointer_size();
701 Ok(Size::from_bytes(bytes as u64))
703 _ => Err(EvalError::Unimplemented(format!("can't handle type: {:?}, with layout: {:?}", ty, layout))),
707 fn eval_operand(&mut self, op: &mir::Operand<'tcx>) -> EvalResult<'tcx, Pointer> {
708 use rustc::mir::repr::Operand::*;
710 Consume(ref lvalue) => Ok(self.eval_lvalue(lvalue)?.to_ptr()),
711 Constant(mir::Constant { ref literal, ty, .. }) => {
712 use rustc::mir::repr::Literal::*;
714 Value { ref value } => Ok(self.const_to_ptr(value)?),
715 Item { def_id, substs } => {
716 if let ty::TyFnDef(..) = ty.sty {
717 // function items are zero sized
718 Ok(self.memory.allocate(0, 0)?)
720 let cid = ConstantId {
723 kind: ConstantKind::Global,
725 Ok(*self.statics.get(&cid).expect("static should have been cached (rvalue)"))
728 Promoted { index } => {
729 let cid = ConstantId {
730 def_id: self.frame().def_id,
731 substs: self.substs(),
732 kind: ConstantKind::Promoted(index),
734 Ok(*self.statics.get(&cid).expect("a promoted constant hasn't been precomputed"))
741 fn eval_lvalue(&mut self, lvalue: &mir::Lvalue<'tcx>) -> EvalResult<'tcx, Lvalue> {
742 use rustc::mir::repr::Lvalue::*;
743 let ptr = match *lvalue {
744 ReturnPointer => self.frame().return_ptr
745 .expect("ReturnPointer used in a function with no return value"),
746 Arg(i) => self.frame().locals[i.index()],
747 Var(i) => self.frame().locals[self.frame().var_offset + i.index()],
748 Temp(i) => self.frame().locals[self.frame().temp_offset + i.index()],
751 let substs = subst::Substs::empty(self.tcx);
752 let cid = ConstantId {
755 kind: ConstantKind::Global,
757 *self.statics.get(&cid).expect("static should have been cached (lvalue)")
760 Projection(ref proj) => {
761 let base = self.eval_lvalue(&proj.base)?;
762 let base_ty = self.lvalue_ty(&proj.base);
763 let base_layout = self.type_layout(base_ty);
765 use rustc::mir::repr::ProjectionElem::*;
768 use rustc::ty::layout::Layout::*;
769 let variant = match *base_layout {
770 Univariant { ref variant, .. } => variant,
771 General { ref variants, .. } => {
772 if let LvalueExtra::DowncastVariant(variant_idx) = base.extra {
773 &variants[variant_idx]
775 panic!("field access on enum had no variant index");
778 RawNullablePointer { .. } => {
779 assert_eq!(field.index(), 0);
782 StructWrappedNullablePointer { ref nonnull, .. } => nonnull,
783 _ => panic!("field access on non-product type: {:?}", base_layout),
786 let offset = variant.field_offset(field.index()).bytes();
787 base.ptr.offset(offset as isize)
790 Downcast(_, variant) => {
791 use rustc::ty::layout::Layout::*;
793 General { discr, .. } => {
795 ptr: base.ptr.offset(discr.size().bytes() as isize),
796 extra: LvalueExtra::DowncastVariant(variant),
799 RawNullablePointer { .. } | StructWrappedNullablePointer { .. } => {
802 _ => panic!("variant downcast on non-aggregate: {:?}", base_layout),
807 let pointee_ty = pointee_type(base_ty).expect("Deref of non-pointer");
808 let ptr = self.memory.read_ptr(base.ptr)?;
809 let extra = match pointee_ty.sty {
810 ty::TySlice(_) | ty::TyStr => {
811 let len_ptr = base.ptr.offset(self.memory.pointer_size() as isize);
812 let len = self.memory.read_usize(len_ptr)?;
813 LvalueExtra::Length(len)
815 ty::TyTrait(_) => unimplemented!(),
816 _ => LvalueExtra::None,
818 return Ok(Lvalue { ptr: ptr, extra: extra });
821 Index(ref operand) => {
822 let elem_size = match base_ty.sty {
823 ty::TyArray(elem_ty, _) |
824 ty::TySlice(elem_ty) => self.type_size(elem_ty),
825 _ => panic!("indexing expected an array or slice, got {:?}", base_ty),
827 let n_ptr = self.eval_operand(operand)?;
828 let n = self.memory.read_usize(n_ptr)?;
829 base.ptr.offset(n as isize * elem_size as isize)
832 ConstantIndex { .. } => unimplemented!(),
833 Subslice { .. } => unimplemented!(),
838 Ok(Lvalue { ptr: ptr, extra: LvalueExtra::None })
841 fn lvalue_ty(&self, lvalue: &mir::Lvalue<'tcx>) -> Ty<'tcx> {
842 self.monomorphize(lvalue.ty(&self.mir(), self.tcx).to_ty(self.tcx), self.substs())
845 fn operand_ty(&self, operand: &mir::Operand<'tcx>) -> Ty<'tcx> {
846 self.monomorphize(operand.ty(&self.mir(), self.tcx), self.substs())
849 fn move_(&mut self, src: Pointer, dest: Pointer, ty: Ty<'tcx>) -> EvalResult<'tcx, ()> {
850 let size = self.type_size(ty);
851 let align = self.type_align(ty);
852 self.memory.copy(src, dest, size, align)?;
856 pub fn read_primval(&mut self, ptr: Pointer, ty: Ty<'tcx>) -> EvalResult<'tcx, PrimVal> {
857 use syntax::ast::{IntTy, UintTy, FloatTy};
858 let val = match (self.memory.pointer_size(), &ty.sty) {
859 (_, &ty::TyBool) => PrimVal::Bool(self.memory.read_bool(ptr)?),
860 (_, &ty::TyChar) => {
861 let c = self.memory.read_uint(ptr, 4)? as u32;
862 match ::std::char::from_u32(c) {
863 Some(ch) => PrimVal::Char(ch),
864 None => return Err(EvalError::InvalidChar(c)),
867 (_, &ty::TyInt(IntTy::I8)) => PrimVal::I8(self.memory.read_int(ptr, 1)? as i8),
868 (2, &ty::TyInt(IntTy::Is)) |
869 (_, &ty::TyInt(IntTy::I16)) => PrimVal::I16(self.memory.read_int(ptr, 2)? as i16),
870 (4, &ty::TyInt(IntTy::Is)) |
871 (_, &ty::TyInt(IntTy::I32)) => PrimVal::I32(self.memory.read_int(ptr, 4)? as i32),
872 (8, &ty::TyInt(IntTy::Is)) |
873 (_, &ty::TyInt(IntTy::I64)) => PrimVal::I64(self.memory.read_int(ptr, 8)? as i64),
874 (_, &ty::TyUint(UintTy::U8)) => PrimVal::U8(self.memory.read_uint(ptr, 1)? as u8),
875 (2, &ty::TyUint(UintTy::Us)) |
876 (_, &ty::TyUint(UintTy::U16)) => PrimVal::U16(self.memory.read_uint(ptr, 2)? as u16),
877 (4, &ty::TyUint(UintTy::Us)) |
878 (_, &ty::TyUint(UintTy::U32)) => PrimVal::U32(self.memory.read_uint(ptr, 4)? as u32),
879 (8, &ty::TyUint(UintTy::Us)) |
880 (_, &ty::TyUint(UintTy::U64)) => PrimVal::U64(self.memory.read_uint(ptr, 8)? as u64),
882 (_, &ty::TyFloat(FloatTy::F32)) => PrimVal::F32(self.memory.read_f32(ptr)?),
883 (_, &ty::TyFloat(FloatTy::F64)) => PrimVal::F64(self.memory.read_f64(ptr)?),
885 (_, &ty::TyFnDef(def_id, substs, fn_ty)) => {
886 PrimVal::FnPtr(self.memory.create_fn_ptr(def_id, substs, fn_ty))
888 (_, &ty::TyFnPtr(_)) => self.memory.read_ptr(ptr).map(PrimVal::FnPtr)?,
889 (_, &ty::TyRef(_, ty::TypeAndMut { ty, .. })) |
890 (_, &ty::TyRawPtr(ty::TypeAndMut { ty, .. })) => {
891 if self.type_is_sized(ty) {
892 match self.memory.read_ptr(ptr) {
893 Ok(p) => PrimVal::AbstractPtr(p),
894 Err(EvalError::ReadBytesAsPointer) => {
895 PrimVal::IntegerPtr(self.memory.read_usize(ptr)?)
897 Err(e) => return Err(e),
900 return Err(EvalError::Unimplemented(format!("unimplemented: primitive read of fat pointer type: {:?}", ty)));
904 _ => panic!("primitive read of non-primitive type: {:?}", ty),
909 fn frame(&self) -> &Frame<'a, 'tcx> {
910 self.stack.last().expect("no call frames exist")
913 pub fn frame_mut(&mut self) -> &mut Frame<'a, 'tcx> {
914 self.stack.last_mut().expect("no call frames exist")
917 fn mir(&self) -> CachedMir<'a, 'tcx> {
918 self.frame().mir.clone()
921 fn substs(&self) -> &'tcx Substs<'tcx> {
926 fn pointee_type(ptr_ty: ty::Ty) -> Option<ty::Ty> {
928 ty::TyRef(_, ty::TypeAndMut { ty, .. }) |
929 ty::TyRawPtr(ty::TypeAndMut { ty, .. }) |
938 fn to_ptr(self) -> Pointer {
939 assert_eq!(self.extra, LvalueExtra::None);
944 impl<'mir, 'tcx: 'mir> Deref for CachedMir<'mir, 'tcx> {
945 type Target = mir::Mir<'tcx>;
946 fn deref(&self) -> &mir::Mir<'tcx> {
948 CachedMir::Ref(r) => r,
949 CachedMir::Owned(ref rc) => rc,
954 pub fn eval_main<'a, 'tcx: 'a>(
955 tcx: TyCtxt<'a, 'tcx, 'tcx>,
956 mir_map: &'a MirMap<'tcx>,
962 let mir = mir_map.map.get(&def_id).expect("no mir for main function");
963 let mut ecx = EvalContext::new(tcx, mir_map, memory_size, stack_limit);
964 let substs = subst::Substs::empty(tcx);
965 let return_ptr = ecx.alloc_ret_ptr(mir.return_ty, substs)
966 .expect("should at least be able to allocate space for the main function's return value");
968 ecx.push_stack_frame(def_id, mir.span, CachedMir::Ref(mir), substs, Some(return_ptr))
969 .expect("could not allocate first stack frame");
971 if mir.arg_decls.len() == 2 {
973 let ptr_size = ecx.memory().pointer_size();
974 let nargs = ecx.memory_mut().allocate(ptr_size, ptr_size).expect("can't allocate memory for nargs");
975 ecx.memory_mut().write_usize(nargs, 0).unwrap();
976 let args = ecx.memory_mut().allocate(ptr_size, ptr_size).expect("can't allocate memory for arg pointer");
977 ecx.memory_mut().write_usize(args, 0).unwrap();
978 ecx.frame_mut().locals[0] = nargs;
979 ecx.frame_mut().locals[1] = args;
982 for _ in 0..step_limit {
986 // FIXME: diverging functions can end up here in some future miri
988 report(tcx, &ecx, e);
993 report(tcx, &ecx, EvalError::ExecutionTimeLimitReached);
996 fn report(tcx: TyCtxt, ecx: &EvalContext, e: EvalError) {
997 let frame = ecx.stack().last().expect("stackframe was empty");
998 let block = &frame.mir.basic_blocks()[frame.block];
999 let span = if frame.stmt < block.statements.len() {
1000 block.statements[frame.stmt].source_info.span
1002 block.terminator().source_info.span
1004 let mut err = tcx.sess.struct_span_err(span, &e.to_string());
1005 for &Frame { def_id, substs, span, .. } in ecx.stack().iter().rev() {
1006 // FIXME(solson): Find a way to do this without this Display impl hack.
1007 use rustc::util::ppaux;
1009 struct Instance<'tcx>(DefId, &'tcx subst::Substs<'tcx>);
1010 impl<'tcx> fmt::Display for Instance<'tcx> {
1011 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
1012 ppaux::parameterized(f, self.1, self.0, ppaux::Ns::Value, &[])
1015 err.span_note(span, &format!("inside call to {}", Instance(def_id, substs)));
1020 pub fn run_mir_passes<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, mir_map: &mut MirMap<'tcx>) {
1021 let mut passes = ::rustc::mir::transform::Passes::new();
1022 passes.push_hook(Box::new(::rustc_mir::transform::dump_mir::DumpMir));
1023 passes.push_pass(Box::new(::rustc_mir::transform::no_landing_pads::NoLandingPads));
1024 passes.push_pass(Box::new(::rustc_mir::transform::simplify_cfg::SimplifyCfg::new("no-landing-pads")));
1026 passes.push_pass(Box::new(::rustc_mir::transform::erase_regions::EraseRegions));
1028 passes.push_pass(Box::new(::rustc_borrowck::ElaborateDrops));
1029 passes.push_pass(Box::new(::rustc_mir::transform::no_landing_pads::NoLandingPads));
1030 passes.push_pass(Box::new(::rustc_mir::transform::simplify_cfg::SimplifyCfg::new("elaborate-drops")));
1031 passes.push_pass(Box::new(::rustc_mir::transform::dump_mir::Marker("PreMiri")));
1033 passes.run_passes(tcx, mir_map);
1036 // TODO(solson): Upstream these methods into rustc::ty::layout.
1039 fn size(self) -> Size;
1042 impl IntegerExt for layout::Integer {
1043 fn size(self) -> Size {
1044 use rustc::ty::layout::Integer::*;
1046 I1 | I8 => Size::from_bits(8),
1047 I16 => Size::from_bits(16),
1048 I32 => Size::from_bits(32),
1049 I64 => Size::from_bits(64),
1055 fn field_offset(&self, index: usize) -> Size;
1058 impl StructExt for layout::Struct {
1059 fn field_offset(&self, index: usize) -> Size {
1063 self.offset_after_field[index - 1]