1 use rustc::middle::const_val;
2 use rustc::hir::def_id::DefId;
3 use rustc::mir::mir_map::MirMap;
4 use rustc::mir::repr as mir;
5 use rustc::traits::{self, ProjectionMode};
6 use rustc::ty::fold::TypeFoldable;
7 use rustc::ty::layout::{self, Layout, Size};
8 use rustc::ty::subst::{self, Subst, Substs};
9 use rustc::ty::{self, Ty, TyCtxt};
10 use rustc::util::nodemap::DefIdMap;
11 use std::cell::RefCell;
12 use std::ops::{Deref, DerefMut};
17 use syntax::codemap::{self, DUMMY_SP, Span};
19 use error::{EvalError, EvalResult};
20 use memory::{Memory, Pointer};
21 use primval::{self, PrimVal};
23 use std::collections::HashMap;
27 struct GlobalEvalContext<'a, 'tcx: 'a> {
28 /// The results of the type checker, from rustc.
29 tcx: TyCtxt<'a, 'tcx, 'tcx>,
31 /// A mapping from NodeIds to Mir, from rustc. Only contains MIR for crate-local items.
32 mir_map: &'a MirMap<'tcx>,
34 /// A local cache from DefIds to Mir for non-crate-local items.
35 mir_cache: RefCell<DefIdMap<Rc<mir::Mir<'tcx>>>>,
37 /// The virtual memory system.
40 /// Precomputed statics, constants and promoteds
41 statics: HashMap<ConstantId<'tcx>, Pointer>,
44 struct FnEvalContext<'a, 'b: 'a + 'mir, 'mir, 'tcx: 'b> {
45 gecx: &'a mut GlobalEvalContext<'b, 'tcx>,
47 /// The virtual call stack.
48 stack: Vec<Frame<'mir, 'tcx>>,
51 impl<'a, 'b, 'mir, 'tcx> Deref for FnEvalContext<'a, 'b, 'mir, 'tcx> {
52 type Target = GlobalEvalContext<'b, 'tcx>;
53 fn deref(&self) -> &Self::Target {
58 impl<'a, 'b, 'mir, 'tcx> DerefMut for FnEvalContext<'a, 'b, 'mir, 'tcx> {
59 fn deref_mut(&mut self) -> &mut Self::Target {
65 struct Frame<'a, 'tcx: 'a> {
66 /// The def_id of the current function
69 /// The span of the call site
72 /// type substitutions for the current function invocation
73 substs: &'tcx Substs<'tcx>,
75 /// The MIR for the function called on this frame.
76 mir: CachedMir<'a, 'tcx>,
78 /// The block that is currently executed (or will be executed after the above call stacks return)
79 next_block: mir::BasicBlock,
81 /// A pointer for writing the return value of the current call if it's not a diverging call.
82 return_ptr: Option<Pointer>,
84 /// The list of locals for the current function, stored in order as
85 /// `[arguments..., variables..., temporaries...]`. The variables begin at `self.var_offset`
86 /// and the temporaries at `self.temp_offset`.
89 /// The offset of the first variable in `self.locals`.
92 /// The offset of the first temporary in `self.locals`.
95 /// The index of the currently evaluated statment
98 // Constants that need to be evaluated before the next statement can be evaluated
99 constants: Vec<(ConstantId<'tcx>, Span, Pointer, CachedMir<'a, 'tcx>)>,
102 #[derive(Copy, Clone, Debug, Eq, PartialEq)]
108 #[derive(Copy, Clone, Debug, Eq, PartialEq)]
112 // TODO(solson): Vtable(memory::AllocId),
113 DowncastVariant(usize),
117 enum CachedMir<'mir, 'tcx: 'mir> {
118 Ref(&'mir mir::Mir<'tcx>),
119 Owned(Rc<mir::Mir<'tcx>>)
122 /// Represents the action to be taken in the main loop as a result of executing a terminator.
123 enum TerminatorTarget {
124 /// Make a local jump to the next block
127 /// Start executing from the new current frame. (For function calls.)
130 /// Stop executing the current frame and resume the previous frame.
134 #[derive(Clone, Debug, Eq, PartialEq, Hash)]
135 enum ConstantId<'tcx> {
136 Promoted { def_id: DefId, substs: &'tcx Substs<'tcx>, index: usize },
137 Static { def_id: DefId, substs: &'tcx Substs<'tcx> },
140 impl<'tcx> ConstantId<'tcx> {
141 fn substs(&self) -> &'tcx Substs<'tcx> {
142 use self::ConstantId::*;
144 Promoted { substs, .. } |
145 Static { substs, .. } => substs
149 fn def_id(&self) -> DefId {
150 use self::ConstantId::*;
152 Promoted { def_id, .. } |
153 Static { def_id, .. } => def_id,
159 impl<'a, 'tcx> GlobalEvalContext<'a, 'tcx> {
160 fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>, mir_map: &'a MirMap<'tcx>) -> Self {
164 mir_cache: RefCell::new(DefIdMap()),
165 memory: Memory::new(tcx.sess
169 .expect("Session::target::uint_type was usize")/8),
170 statics: HashMap::new(),
174 fn call(&mut self, mir: &mir::Mir<'tcx>, def_id: DefId) -> EvalResult<Option<Pointer>> {
175 let substs = self.tcx.mk_substs(subst::Substs::empty());
176 let return_ptr = self.alloc_ret_ptr(mir.return_ty, substs);
178 let mut nested_fecx = FnEvalContext::new(self);
180 nested_fecx.push_stack_frame(def_id, mir.span, CachedMir::Ref(mir), substs, None);
182 nested_fecx.frame_mut().return_ptr = return_ptr;
188 fn alloc_ret_ptr(&mut self, ty: ty::FnOutput<'tcx>, substs: &'tcx Substs<'tcx>) -> Option<Pointer> {
190 ty::FnConverging(ty) => {
191 let size = self.type_size(ty, substs);
192 Some(self.memory.allocate(size))
194 ty::FnDiverging => None,
197 // TODO(solson): Try making const_to_primval instead.
198 fn const_to_ptr(&mut self, const_val: &const_val::ConstVal) -> EvalResult<Pointer> {
199 use rustc::middle::const_val::ConstVal::*;
201 Float(_f) => unimplemented!(),
203 // TODO(solson): Check int constant type.
204 let ptr = self.memory.allocate(8);
205 self.memory.write_uint(ptr, int.to_u64_unchecked(), 8)?;
209 let psize = self.memory.pointer_size;
210 let static_ptr = self.memory.allocate(s.len());
211 let ptr = self.memory.allocate(psize * 2);
212 self.memory.write_bytes(static_ptr, s.as_bytes())?;
213 self.memory.write_ptr(ptr, static_ptr)?;
214 self.memory.write_usize(ptr.offset(psize as isize), s.len() as u64)?;
218 let psize = self.memory.pointer_size;
219 let static_ptr = self.memory.allocate(bs.len());
220 let ptr = self.memory.allocate(psize);
221 self.memory.write_bytes(static_ptr, bs)?;
222 self.memory.write_ptr(ptr, static_ptr)?;
226 let ptr = self.memory.allocate(1);
227 self.memory.write_bool(ptr, b)?;
230 Char(_c) => unimplemented!(),
231 Struct(_node_id) => unimplemented!(),
232 Tuple(_node_id) => unimplemented!(),
233 Function(_def_id) => unimplemented!(),
234 Array(_, _) => unimplemented!(),
235 Repeat(_, _) => unimplemented!(),
236 Dummy => unimplemented!(),
240 fn type_needs_drop(&self, ty: Ty<'tcx>) -> bool {
241 self.tcx.type_needs_drop_given_env(ty, &self.tcx.empty_parameter_environment())
244 fn type_is_sized(&self, ty: Ty<'tcx>) -> bool {
245 ty.is_sized(self.tcx, &self.tcx.empty_parameter_environment(), DUMMY_SP)
248 fn fulfill_obligation(&self, trait_ref: ty::PolyTraitRef<'tcx>) -> traits::Vtable<'tcx, ()> {
249 // Do the initial selection for the obligation. This yields the shallow result we are
250 // looking for -- that is, what specific impl.
251 self.tcx.normalizing_infer_ctxt(ProjectionMode::Any).enter(|infcx| {
252 let mut selcx = traits::SelectionContext::new(&infcx);
254 let obligation = traits::Obligation::new(
255 traits::ObligationCause::misc(DUMMY_SP, ast::DUMMY_NODE_ID),
256 trait_ref.to_poly_trait_predicate(),
258 let selection = selcx.select(&obligation).unwrap().unwrap();
260 // Currently, we use a fulfillment context to completely resolve all nested obligations.
261 // This is because they can inform the inference of the impl's type parameters.
262 let mut fulfill_cx = traits::FulfillmentContext::new();
263 let vtable = selection.map(|predicate| {
264 fulfill_cx.register_predicate_obligation(&infcx, predicate);
266 infcx.drain_fulfillment_cx_or_panic(DUMMY_SP, &mut fulfill_cx, &vtable)
270 /// Trait method, which has to be resolved to an impl method.
274 substs: &'tcx Substs<'tcx>
275 ) -> (DefId, &'tcx Substs<'tcx>) {
276 let method_item = self.tcx.impl_or_trait_item(def_id);
277 let trait_id = method_item.container().id();
278 let trait_ref = ty::Binder(substs.to_trait_ref(self.tcx, trait_id));
279 match self.fulfill_obligation(trait_ref) {
280 traits::VtableImpl(vtable_impl) => {
281 let impl_did = vtable_impl.impl_def_id;
282 let mname = self.tcx.item_name(def_id);
283 // Create a concatenated set of substitutions which includes those from the impl
284 // and those from the method:
285 let impl_substs = vtable_impl.substs.with_method_from(substs);
286 let substs = self.tcx.mk_substs(impl_substs);
287 let mth = get_impl_method(self.tcx, impl_did, substs, mname);
289 (mth.method.def_id, mth.substs)
292 traits::VtableClosure(vtable_closure) =>
293 (vtable_closure.closure_def_id, vtable_closure.substs.func_substs),
295 traits::VtableFnPointer(_fn_ty) => {
296 let _trait_closure_kind = self.tcx.lang_items.fn_trait_kind(trait_id).unwrap();
298 // let llfn = trans_fn_pointer_shim(ccx, trait_closure_kind, fn_ty);
300 // let method_ty = def_ty(tcx, def_id, substs);
301 // let fn_ptr_ty = match method_ty.sty {
302 // ty::TyFnDef(_, _, fty) => tcx.mk_ty(ty::TyFnPtr(fty)),
303 // _ => unreachable!("expected fn item type, found {}",
306 // Callee::ptr(immediate_rvalue(llfn, fn_ptr_ty))
309 traits::VtableObject(ref _data) => {
312 // data: Virtual(traits::get_vtable_index_of_object_method(
313 // tcx, data, def_id)),
314 // ty: def_ty(tcx, def_id, substs)
317 vtable => unreachable!("resolved vtable bad vtable {:?} in trans", vtable),
322 impl<'a, 'b, 'mir, 'tcx> FnEvalContext<'a, 'b, 'mir, 'tcx> {
323 fn new(gecx: &'a mut GlobalEvalContext<'b, 'tcx>) -> Self {
332 fn report(&self, e: &EvalError) {
333 let stmt = self.frame().stmt;
334 let block = self.basic_block();
335 let span = if stmt < block.statements.len() {
336 block.statements[stmt].span
338 block.terminator().span
340 let mut err = self.tcx.sess.struct_span_err(span, &e.to_string());
341 for &Frame{ def_id, substs, span, .. } in self.stack.iter().rev() {
342 // FIXME(solson): Find a way to do this without this Display impl hack.
343 use rustc::util::ppaux;
345 struct Instance<'tcx>(DefId, &'tcx Substs<'tcx>);
346 impl<'tcx> fmt::Display for Instance<'tcx> {
347 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
348 ppaux::parameterized(f, self.1, self.0, ppaux::Ns::Value, &[],
349 |tcx| tcx.lookup_item_type(self.0).generics)
352 err.span_note(span, &format!("inside call to {}", Instance(def_id, substs)));
357 fn maybe_report<T>(&self, r: EvalResult<T>) -> EvalResult<T> {
358 if let Err(ref e) = r {
364 fn run(&mut self) -> EvalResult<()> {
365 let mut stepper = stepper::Stepper::new(self);
367 use self::stepper::Event::*;
368 trace!("// {:?}", stepper.block());
371 match stepper.step()? {
372 Constant => trace!("next statement requires the computation of a constant"),
373 Assignment => trace!("{:?}", stepper.stmt()),
375 trace!("{:?}", stepper.term().kind);
378 Done => return Ok(()),
384 fn push_stack_frame(&mut self, def_id: DefId, span: codemap::Span, mir: CachedMir<'mir, 'tcx>, substs: &'tcx Substs<'tcx>,
385 return_ptr: Option<Pointer>)
387 let arg_tys = mir.arg_decls.iter().map(|a| a.ty);
388 let var_tys = mir.var_decls.iter().map(|v| v.ty);
389 let temp_tys = mir.temp_decls.iter().map(|t| t.ty);
391 let num_args = mir.arg_decls.len();
392 let num_vars = mir.var_decls.len();
394 ::log_settings::settings().indentation += 1;
396 self.stack.push(Frame {
398 next_block: mir::START_BLOCK,
399 return_ptr: return_ptr,
401 var_offset: num_args,
402 temp_offset: num_args + num_vars,
407 constants: Vec::new(),
410 let locals: Vec<Pointer> = arg_tys.chain(var_tys).chain(temp_tys).map(|ty| {
411 let size = self.type_size(ty);
412 self.memory.allocate(size)
415 self.frame_mut().locals = locals;
418 fn pop_stack_frame(&mut self) {
419 ::log_settings::settings().indentation -= 1;
420 let _frame = self.stack.pop().expect("tried to pop a stack frame, but there were none");
421 // TODO(solson): Deallocate local variables.
424 fn eval_terminator(&mut self, terminator: &mir::Terminator<'tcx>)
425 -> EvalResult<TerminatorTarget> {
426 use rustc::mir::repr::TerminatorKind::*;
427 let target = match terminator.kind {
428 Return => TerminatorTarget::Return,
431 self.frame_mut().next_block = target;
432 TerminatorTarget::Block
435 If { ref cond, targets: (then_target, else_target) } => {
436 let cond_ptr = self.eval_operand(cond)?;
437 let cond_val = self.memory.read_bool(cond_ptr)?;
438 self.frame_mut().next_block = if cond_val { then_target } else { else_target };
439 TerminatorTarget::Block
442 SwitchInt { ref discr, ref values, ref targets, .. } => {
443 let discr_ptr = self.eval_lvalue(discr)?.to_ptr();
444 let discr_size = self
445 .type_layout(self.lvalue_ty(discr))
446 .size(&self.tcx.data_layout)
448 let discr_val = self.memory.read_uint(discr_ptr, discr_size)?;
450 // Branch to the `otherwise` case by default, if no match is found.
451 let mut target_block = targets[targets.len() - 1];
453 for (index, val_const) in values.iter().enumerate() {
454 let ptr = self.const_to_ptr(val_const)?;
455 let val = self.memory.read_uint(ptr, discr_size)?;
456 if discr_val == val {
457 target_block = targets[index];
462 self.frame_mut().next_block = target_block;
463 TerminatorTarget::Block
466 Switch { ref discr, ref targets, adt_def } => {
467 let adt_ptr = self.eval_lvalue(discr)?.to_ptr();
468 let adt_ty = self.lvalue_ty(discr);
469 let discr_val = self.read_discriminant_value(adt_ptr, adt_ty)?;
470 let matching = adt_def.variants.iter()
471 .position(|v| discr_val == v.disr_val.to_u64_unchecked());
475 self.frame_mut().next_block = targets[i];
476 TerminatorTarget::Block
478 None => return Err(EvalError::InvalidDiscriminant),
482 Call { ref func, ref args, ref destination, .. } => {
483 let mut return_ptr = None;
484 if let Some((ref lv, target)) = *destination {
485 self.frame_mut().next_block = target;
486 return_ptr = Some(self.eval_lvalue(lv)?.to_ptr());
489 let func_ty = self.operand_ty(func);
491 ty::TyFnDef(def_id, substs, fn_ty) => {
492 use syntax::abi::Abi;
494 Abi::RustIntrinsic => {
495 let name = self.tcx.item_name(def_id).as_str();
496 match fn_ty.sig.0.output {
497 ty::FnConverging(ty) => {
498 let size = self.type_size(ty);
499 let ret = return_ptr.unwrap();
500 self.call_intrinsic(&name, substs, args, ret, size)?
502 ty::FnDiverging => unimplemented!(),
507 match fn_ty.sig.0.output {
508 ty::FnConverging(ty) => {
509 let size = self.type_size(ty);
510 self.call_c_abi(def_id, args, return_ptr.unwrap(), size)?
512 ty::FnDiverging => unimplemented!(),
516 Abi::Rust | Abi::RustCall => {
517 // TODO(solson): Adjust the first argument when calling a Fn or
518 // FnMut closure via FnOnce::call_once.
520 // Only trait methods can have a Self parameter.
521 let (resolved_def_id, resolved_substs) = if substs.self_ty().is_some() {
522 self.trait_method(def_id, substs)
527 let mut arg_srcs = Vec::new();
529 let src = self.eval_operand(arg)?;
530 let src_ty = self.operand_ty(arg);
531 arg_srcs.push((src, src_ty));
534 if fn_ty.abi == Abi::RustCall && !args.is_empty() {
536 let last_arg = args.last().unwrap();
537 let last = self.eval_operand(last_arg)?;
538 let last_ty = self.operand_ty(last_arg);
539 let last_layout = self.type_layout(last_ty);
540 match (&last_ty.sty, last_layout) {
541 (&ty::TyTuple(fields),
542 &Layout::Univariant { ref variant, .. }) => {
543 let offsets = iter::once(0)
544 .chain(variant.offset_after_field.iter()
545 .map(|s| s.bytes()));
546 for (offset, ty) in offsets.zip(fields) {
547 let src = last.offset(offset as isize);
548 arg_srcs.push((src, ty));
551 ty => panic!("expected tuple as last argument in function with 'rust-call' ABI, got {:?}", ty),
555 let mir = self.load_mir(resolved_def_id);
556 self.push_stack_frame(def_id, terminator.span, mir, resolved_substs, return_ptr);
558 for (i, (src, src_ty)) in arg_srcs.into_iter().enumerate() {
559 let dest = self.frame().locals[i];
560 self.move_(src, dest, src_ty)?;
563 TerminatorTarget::Call
566 abi => return Err(EvalError::Unimplemented(format!("can't handle function with {:?} ABI", abi))),
570 _ => return Err(EvalError::Unimplemented(format!("can't handle callee of type {:?}", func_ty))),
574 Drop { ref value, target, .. } => {
575 let ptr = self.eval_lvalue(value)?.to_ptr();
576 let ty = self.lvalue_ty(value);
578 self.frame_mut().next_block = target;
579 TerminatorTarget::Block
582 Resume => unimplemented!(),
588 fn drop(&mut self, ptr: Pointer, ty: Ty<'tcx>) -> EvalResult<()> {
589 if !self.type_needs_drop(ty) {
590 debug!("no need to drop {:?}", ty);
593 trace!("-need to drop {:?}", ty);
595 // TODO(solson): Call user-defined Drop::drop impls.
598 ty::TyBox(contents_ty) => {
599 match self.memory.read_ptr(ptr) {
600 Ok(contents_ptr) => {
601 self.drop(contents_ptr, contents_ty)?;
602 trace!("-deallocating box");
603 self.memory.deallocate(contents_ptr)?;
605 Err(EvalError::ReadBytesAsPointer) => {
606 let size = self.memory.pointer_size;
607 let possible_drop_fill = self.memory.read_bytes(ptr, size)?;
608 if possible_drop_fill.iter().all(|&b| b == mem::POST_DROP_U8) {
611 return Err(EvalError::ReadBytesAsPointer);
614 Err(e) => return Err(e),
618 // TODO(solson): Implement drop for other relevant types (e.g. aggregates).
623 // FIXME(solson): Trait objects (with no static size) probably get filled, too.
624 let size = self.type_size(ty);
625 self.memory.drop_fill(ptr, size)?;
630 fn read_discriminant_value(&self, adt_ptr: Pointer, adt_ty: Ty<'tcx>) -> EvalResult<u64> {
631 use rustc::ty::layout::Layout::*;
632 let adt_layout = self.type_layout(adt_ty);
634 let discr_val = match *adt_layout {
635 General { discr, .. } | CEnum { discr, .. } => {
636 let discr_size = discr.size().bytes();
637 self.memory.read_uint(adt_ptr, discr_size as usize)?
640 RawNullablePointer { nndiscr, .. } => {
641 self.read_nonnull_discriminant_value(adt_ptr, nndiscr)?
644 StructWrappedNullablePointer { nndiscr, ref discrfield, .. } => {
645 let offset = self.nonnull_offset(adt_ty, nndiscr, discrfield)?;
646 let nonnull = adt_ptr.offset(offset.bytes() as isize);
647 self.read_nonnull_discriminant_value(nonnull, nndiscr)?
650 // The discriminant_value intrinsic returns 0 for non-sum types.
651 Array { .. } | FatPointer { .. } | Scalar { .. } | Univariant { .. } |
658 fn read_nonnull_discriminant_value(&self, ptr: Pointer, nndiscr: u64) -> EvalResult<u64> {
659 let not_null = match self.memory.read_usize(ptr) {
661 Ok(_) | Err(EvalError::ReadPointerAsBytes) => true,
662 Err(e) => return Err(e),
664 assert!(nndiscr == 0 || nndiscr == 1);
665 Ok(if not_null { nndiscr } else { 1 - nndiscr })
671 substs: &'tcx Substs<'tcx>,
672 args: &[mir::Operand<'tcx>],
675 ) -> EvalResult<TerminatorTarget> {
676 let args_res: EvalResult<Vec<Pointer>> = args.iter()
677 .map(|arg| self.eval_operand(arg))
679 let args = args_res?;
682 // FIXME(solson): Handle different integer types correctly.
683 "add_with_overflow" => {
684 let ty = *substs.types.get(subst::FnSpace, 0);
685 let size = self.type_size(ty);
686 let left = self.memory.read_int(args[0], size)?;
687 let right = self.memory.read_int(args[1], size)?;
688 let (n, overflowed) = unsafe {
689 ::std::intrinsics::add_with_overflow::<i64>(left, right)
691 self.memory.write_int(dest, n, size)?;
692 self.memory.write_bool(dest.offset(size as isize), overflowed)?;
697 "copy_nonoverlapping" => {
698 let elem_ty = *substs.types.get(subst::FnSpace, 0);
699 let elem_size = self.type_size(elem_ty);
700 let src = self.memory.read_ptr(args[0])?;
701 let dest = self.memory.read_ptr(args[1])?;
702 let count = self.memory.read_isize(args[2])?;
703 self.memory.copy(src, dest, count as usize * elem_size)?;
706 "discriminant_value" => {
707 let ty = *substs.types.get(subst::FnSpace, 0);
708 let adt_ptr = self.memory.read_ptr(args[0])?;
709 let discr_val = self.read_discriminant_value(adt_ptr, ty)?;
710 self.memory.write_uint(dest, discr_val, dest_size)?;
714 let arg_ty = *substs.types.get(subst::FnSpace, 0);
715 let arg_size = self.type_size(arg_ty);
716 self.memory.drop_fill(args[0], arg_size)?;
719 "init" => self.memory.write_repeat(dest, 0, dest_size)?,
722 self.memory.write_int(dest, 1, dest_size)?;
726 let ty = *substs.types.get(subst::FnSpace, 0);
727 let ptr = self.memory.read_ptr(args[0])?;
728 self.move_(args[1], ptr, ty)?;
731 // FIXME(solson): Handle different integer types correctly.
732 "mul_with_overflow" => {
733 let ty = *substs.types.get(subst::FnSpace, 0);
734 let size = self.type_size(ty);
735 let left = self.memory.read_int(args[0], size)?;
736 let right = self.memory.read_int(args[1], size)?;
737 let (n, overflowed) = unsafe {
738 ::std::intrinsics::mul_with_overflow::<i64>(left, right)
740 self.memory.write_int(dest, n, size)?;
741 self.memory.write_bool(dest.offset(size as isize), overflowed)?;
745 let pointee_ty = *substs.types.get(subst::FnSpace, 0);
746 let pointee_size = self.type_size(pointee_ty) as isize;
747 let ptr_arg = args[0];
748 let offset = self.memory.read_isize(args[1])?;
750 match self.memory.read_ptr(ptr_arg) {
752 let result_ptr = ptr.offset(offset as isize * pointee_size);
753 self.memory.write_ptr(dest, result_ptr)?;
755 Err(EvalError::ReadBytesAsPointer) => {
756 let addr = self.memory.read_isize(ptr_arg)?;
757 let result_addr = addr + offset * pointee_size as i64;
758 self.memory.write_isize(dest, result_addr)?;
760 Err(e) => return Err(e),
764 // FIXME(solson): Handle different integer types correctly. Use primvals?
765 "overflowing_sub" => {
766 let ty = *substs.types.get(subst::FnSpace, 0);
767 let size = self.type_size(ty);
768 let left = self.memory.read_int(args[0], size)?;
769 let right = self.memory.read_int(args[1], size)?;
770 let n = left.wrapping_sub(right);
771 self.memory.write_int(dest, n, size)?;
775 let ty = *substs.types.get(subst::FnSpace, 0);
776 let size = self.type_size(ty) as u64;
777 self.memory.write_uint(dest, size, dest_size)?;
781 let ty = *substs.types.get(subst::FnSpace, 0);
782 if self.type_is_sized(ty) {
783 let size = self.type_size(ty) as u64;
784 self.memory.write_uint(dest, size, dest_size)?;
787 ty::TySlice(_) | ty::TyStr => {
788 let elem_ty = ty.sequence_element_type(self.tcx);
789 let elem_size = self.type_size(elem_ty) as u64;
790 let ptr_size = self.memory.pointer_size as isize;
791 let n = self.memory.read_usize(args[0].offset(ptr_size))?;
792 self.memory.write_uint(dest, n * elem_size, dest_size)?;
795 _ => return Err(EvalError::Unimplemented(format!("unimplemented: size_of_val::<{:?}>", ty))),
801 let ty = *substs.types.get(subst::FnSpace, 0);
802 self.move_(args[0], dest, ty)?;
804 "uninit" => self.memory.mark_definedness(dest, dest_size, false)?,
806 name => return Err(EvalError::Unimplemented(format!("unimplemented intrinsic: {}", name))),
809 // Since we pushed no stack frame, the main loop will act
810 // as if the call just completed and it's returning to the
812 Ok(TerminatorTarget::Call)
818 args: &[mir::Operand<'tcx>],
821 ) -> EvalResult<TerminatorTarget> {
822 let name = self.tcx.item_name(def_id);
823 let attrs = self.tcx.get_attrs(def_id);
824 let link_name = match attr::first_attr_value_str_by_name(&attrs, "link_name") {
825 Some(ln) => ln.clone(),
826 None => name.as_str(),
829 let args_res: EvalResult<Vec<Pointer>> = args.iter()
830 .map(|arg| self.eval_operand(arg))
832 let args = args_res?;
834 match &link_name[..] {
835 "__rust_allocate" => {
836 let size = self.memory.read_usize(args[0])?;
837 let ptr = self.memory.allocate(size as usize);
838 self.memory.write_ptr(dest, ptr)?;
841 "__rust_reallocate" => {
842 let ptr = self.memory.read_ptr(args[0])?;
843 let size = self.memory.read_usize(args[2])?;
844 self.memory.reallocate(ptr, size as usize)?;
845 self.memory.write_ptr(dest, ptr)?;
849 let left = self.memory.read_ptr(args[0])?;
850 let right = self.memory.read_ptr(args[1])?;
851 let n = self.memory.read_usize(args[2])? as usize;
854 let left_bytes = self.memory.read_bytes(left, n)?;
855 let right_bytes = self.memory.read_bytes(right, n)?;
857 use std::cmp::Ordering::*;
858 match left_bytes.cmp(right_bytes) {
865 self.memory.write_int(dest, result, dest_size)?;
868 _ => return Err(EvalError::Unimplemented(format!("can't call C ABI function: {}", link_name))),
871 // Since we pushed no stack frame, the main loop will act
872 // as if the call just completed and it's returning to the
874 Ok(TerminatorTarget::Call)
877 fn assign_fields<I: IntoIterator<Item = u64>>(
881 operands: &[mir::Operand<'tcx>],
882 ) -> EvalResult<()> {
883 for (offset, operand) in offsets.into_iter().zip(operands) {
884 let src = self.eval_operand(operand)?;
885 let src_ty = self.operand_ty(operand);
886 let field_dest = dest.offset(offset as isize);
887 self.move_(src, field_dest, src_ty)?;
892 fn eval_assignment(&mut self, lvalue: &mir::Lvalue<'tcx>, rvalue: &mir::Rvalue<'tcx>)
895 let dest = self.eval_lvalue(lvalue)?.to_ptr();
896 let dest_ty = self.lvalue_ty(lvalue);
897 let dest_layout = self.type_layout(dest_ty);
899 use rustc::mir::repr::Rvalue::*;
901 Use(ref operand) => {
902 let src = self.eval_operand(operand)?;
903 self.move_(src, dest, dest_ty)?;
906 BinaryOp(bin_op, ref left, ref right) => {
907 let left_ptr = self.eval_operand(left)?;
908 let left_ty = self.operand_ty(left);
909 let left_val = self.read_primval(left_ptr, left_ty)?;
911 let right_ptr = self.eval_operand(right)?;
912 let right_ty = self.operand_ty(right);
913 let right_val = self.read_primval(right_ptr, right_ty)?;
915 let val = primval::binary_op(bin_op, left_val, right_val)?;
916 self.memory.write_primval(dest, val)?;
919 UnaryOp(un_op, ref operand) => {
920 let ptr = self.eval_operand(operand)?;
921 let ty = self.operand_ty(operand);
922 let val = self.read_primval(ptr, ty)?;
923 self.memory.write_primval(dest, primval::unary_op(un_op, val)?)?;
926 Aggregate(ref kind, ref operands) => {
927 use rustc::ty::layout::Layout::*;
929 Univariant { ref variant, .. } => {
930 let offsets = iter::once(0)
931 .chain(variant.offset_after_field.iter().map(|s| s.bytes()));
932 self.assign_fields(dest, offsets, operands)?;
936 let elem_size = match dest_ty.sty {
937 ty::TyArray(elem_ty, _) => self.type_size(elem_ty) as u64,
938 _ => panic!("tried to assign {:?} to non-array type {:?}",
941 let offsets = (0..).map(|i| i * elem_size);
942 self.assign_fields(dest, offsets, operands)?;
945 General { discr, ref variants, .. } => {
946 if let mir::AggregateKind::Adt(adt_def, variant, _) = *kind {
947 let discr_val = adt_def.variants[variant].disr_val.to_u64_unchecked();
948 let discr_size = discr.size().bytes() as usize;
949 self.memory.write_uint(dest, discr_val, discr_size)?;
951 let offsets = variants[variant].offset_after_field.iter()
953 self.assign_fields(dest, offsets, operands)?;
955 panic!("tried to assign {:?} to Layout::General", kind);
959 RawNullablePointer { nndiscr, .. } => {
960 if let mir::AggregateKind::Adt(_, variant, _) = *kind {
961 if nndiscr == variant as u64 {
962 assert_eq!(operands.len(), 1);
963 let operand = &operands[0];
964 let src = self.eval_operand(operand)?;
965 let src_ty = self.operand_ty(operand);
966 self.move_(src, dest, src_ty)?;
968 assert_eq!(operands.len(), 0);
969 self.memory.write_isize(dest, 0)?;
972 panic!("tried to assign {:?} to Layout::RawNullablePointer", kind);
976 StructWrappedNullablePointer { nndiscr, ref nonnull, ref discrfield } => {
977 if let mir::AggregateKind::Adt(_, variant, _) = *kind {
978 if nndiscr == variant as u64 {
979 let offsets = iter::once(0)
980 .chain(nonnull.offset_after_field.iter().map(|s| s.bytes()));
981 try!(self.assign_fields(dest, offsets, operands));
983 assert_eq!(operands.len(), 0);
984 let offset = self.nonnull_offset(dest_ty, nndiscr, discrfield)?;
985 let dest = dest.offset(offset.bytes() as isize);
986 try!(self.memory.write_isize(dest, 0));
989 panic!("tried to assign {:?} to Layout::RawNullablePointer", kind);
993 CEnum { discr, signed, .. } => {
994 assert_eq!(operands.len(), 0);
995 if let mir::AggregateKind::Adt(adt_def, variant, _) = *kind {
996 let val = adt_def.variants[variant].disr_val.to_u64_unchecked();
997 let size = discr.size().bytes() as usize;
1000 self.memory.write_int(dest, val as i64, size)?;
1002 self.memory.write_uint(dest, val, size)?;
1005 panic!("tried to assign {:?} to Layout::CEnum", kind);
1009 _ => return Err(EvalError::Unimplemented(format!("can't handle destination layout {:?} when assigning {:?}", dest_layout, kind))),
1013 Repeat(ref operand, _) => {
1014 let (elem_size, length) = match dest_ty.sty {
1015 ty::TyArray(elem_ty, n) => (self.type_size(elem_ty), n),
1016 _ => panic!("tried to assign array-repeat to non-array type {:?}", dest_ty),
1019 let src = self.eval_operand(operand)?;
1020 for i in 0..length {
1021 let elem_dest = dest.offset((i * elem_size) as isize);
1022 self.memory.copy(src, elem_dest, elem_size)?;
1026 Len(ref lvalue) => {
1027 let src = self.eval_lvalue(lvalue)?;
1028 let ty = self.lvalue_ty(lvalue);
1029 let len = match ty.sty {
1030 ty::TyArray(_, n) => n as u64,
1031 ty::TySlice(_) => if let LvalueExtra::Length(n) = src.extra {
1034 panic!("Rvalue::Len of a slice given non-slice pointer: {:?}", src);
1036 _ => panic!("Rvalue::Len expected array or slice, got {:?}", ty),
1038 self.memory.write_usize(dest, len)?;
1041 Ref(_, _, ref lvalue) => {
1042 let lv = self.eval_lvalue(lvalue)?;
1043 self.memory.write_ptr(dest, lv.ptr)?;
1045 LvalueExtra::None => {},
1046 LvalueExtra::Length(len) => {
1047 let len_ptr = dest.offset(self.memory.pointer_size as isize);
1048 self.memory.write_usize(len_ptr, len)?;
1050 LvalueExtra::DowncastVariant(..) =>
1051 panic!("attempted to take a reference to an enum downcast lvalue"),
1056 let size = self.type_size(ty);
1057 let ptr = self.memory.allocate(size);
1058 self.memory.write_ptr(dest, ptr)?;
1061 Cast(kind, ref operand, dest_ty) => {
1062 let src = self.eval_operand(operand)?;
1063 let src_ty = self.operand_ty(operand);
1065 use rustc::mir::repr::CastKind::*;
1068 self.move_(src, dest, src_ty)?;
1069 let src_pointee_ty = pointee_type(src_ty).unwrap();
1070 let dest_pointee_ty = pointee_type(dest_ty).unwrap();
1072 match (&src_pointee_ty.sty, &dest_pointee_ty.sty) {
1073 (&ty::TyArray(_, length), &ty::TySlice(_)) => {
1074 let len_ptr = dest.offset(self.memory.pointer_size as isize);
1075 self.memory.write_usize(len_ptr, length as u64)?;
1078 _ => return Err(EvalError::Unimplemented(format!("can't handle cast: {:?}", rvalue))),
1083 // FIXME(solson): Wrong for almost everything.
1084 let size = dest_layout.size(&self.tcx.data_layout).bytes() as usize;
1085 self.memory.copy(src, dest, size)?;
1088 _ => return Err(EvalError::Unimplemented(format!("can't handle cast: {:?}", rvalue))),
1092 Slice { .. } => unimplemented!(),
1093 InlineAsm { .. } => unimplemented!(),
1099 fn nonnull_offset(&self, ty: Ty<'tcx>, nndiscr: u64, discrfield: &[u32]) -> EvalResult<Size> {
1100 // Skip the constant 0 at the start meant for LLVM GEP.
1101 let mut path = discrfield.iter().skip(1).map(|&i| i as usize);
1103 // Handle the field index for the outer non-null variant.
1104 let inner_ty = match ty.sty {
1105 ty::TyEnum(adt_def, substs) => {
1106 let variant = &adt_def.variants[nndiscr as usize];
1107 let index = path.next().unwrap();
1108 let field = &variant.fields[index];
1109 field.ty(self.tcx, substs)
1112 "non-enum for StructWrappedNullablePointer: {}",
1117 self.field_path_offset(inner_ty, path)
1120 fn field_path_offset<I: Iterator<Item = usize>>(&self, mut ty: Ty<'tcx>, path: I) -> EvalResult<Size> {
1121 let mut offset = Size::from_bytes(0);
1123 // Skip the initial 0 intended for LLVM GEP.
1124 for field_index in path {
1125 let field_offset = self.get_field_offset(ty, field_index)?;
1126 ty = self.get_field_ty(ty, field_index)?;
1127 offset = offset.checked_add(field_offset, &self.tcx.data_layout).unwrap();
1133 fn get_field_ty(&self, ty: Ty<'tcx>, field_index: usize) -> EvalResult<Ty<'tcx>> {
1135 ty::TyStruct(adt_def, substs) => {
1136 Ok(adt_def.struct_variant().fields[field_index].ty(self.tcx, substs))
1139 ty::TyRef(_, ty::TypeAndMut { ty, .. }) |
1140 ty::TyRawPtr(ty::TypeAndMut { ty, .. }) |
1142 assert_eq!(field_index, 0);
1145 _ => Err(EvalError::Unimplemented(format!("can't handle type: {:?}", ty))),
1149 fn get_field_offset(&self, ty: Ty<'tcx>, field_index: usize) -> EvalResult<Size> {
1150 let layout = self.type_layout(ty);
1152 use rustc::ty::layout::Layout::*;
1154 Univariant { .. } => {
1155 assert_eq!(field_index, 0);
1156 Ok(Size::from_bytes(0))
1158 FatPointer { .. } => {
1159 let bytes = layout::FAT_PTR_ADDR * self.memory.pointer_size;
1160 Ok(Size::from_bytes(bytes as u64))
1162 _ => Err(EvalError::Unimplemented(format!("can't handle type: {:?}, with layout: {:?}", ty, layout))),
1166 fn eval_operand(&mut self, op: &mir::Operand<'tcx>) -> EvalResult<Pointer> {
1167 use rustc::mir::repr::Operand::*;
1169 Consume(ref lvalue) => Ok(self.eval_lvalue(lvalue)?.to_ptr()),
1170 Constant(mir::Constant { ref literal, .. }) => {
1171 use rustc::mir::repr::Literal::*;
1173 Value { ref value } => Ok(self.const_to_ptr(value)?),
1174 Item { def_id, substs } => {
1175 let item_ty = self.tcx.lookup_item_type(def_id).subst(self.tcx, substs);
1176 if item_ty.ty.is_fn() {
1177 Err(EvalError::Unimplemented("unimplemented: mentions of function items".to_string()))
1179 let cid = ConstantId::Static{ def_id: def_id, substs: substs };
1180 Ok(*self.statics.get(&cid).expect("static should have been cached (rvalue)"))
1183 Promoted { index } => {
1184 let cid = ConstantId::Promoted {
1185 def_id: self.frame().def_id,
1186 substs: self.substs(),
1189 Ok(*self.statics.get(&cid).expect("a promoted constant hasn't been precomputed"))
1196 fn eval_lvalue(&mut self, lvalue: &mir::Lvalue<'tcx>) -> EvalResult<Lvalue> {
1197 use rustc::mir::repr::Lvalue::*;
1198 let ptr = match *lvalue {
1199 ReturnPointer => self.frame().return_ptr
1200 .expect("ReturnPointer used in a function with no return value"),
1201 Arg(i) => self.frame().locals[i as usize],
1202 Var(i) => self.frame().locals[self.frame().var_offset + i as usize],
1203 Temp(i) => self.frame().locals[self.frame().temp_offset + i as usize],
1206 let substs = self.tcx.mk_substs(subst::Substs::empty());
1207 let cid = ConstantId::Static{ def_id: def_id, substs: substs };
1208 *self.gecx.statics.get(&cid).expect("static should have been cached (lvalue)")
1211 Projection(ref proj) => {
1212 let base = self.eval_lvalue(&proj.base)?;
1213 let base_ty = self.lvalue_ty(&proj.base);
1214 let base_layout = self.type_layout(base_ty);
1216 use rustc::mir::repr::ProjectionElem::*;
1218 Field(field, _) => {
1219 use rustc::ty::layout::Layout::*;
1220 let variant = match *base_layout {
1221 Univariant { ref variant, .. } => variant,
1222 General { ref variants, .. } => {
1223 if let LvalueExtra::DowncastVariant(variant_idx) = base.extra {
1224 &variants[variant_idx]
1226 panic!("field access on enum had no variant index");
1229 RawNullablePointer { .. } => {
1230 assert_eq!(field.index(), 0);
1233 StructWrappedNullablePointer { ref nonnull, .. } => nonnull,
1234 _ => panic!("field access on non-product type: {:?}", base_layout),
1237 let offset = variant.field_offset(field.index()).bytes();
1238 base.ptr.offset(offset as isize)
1241 Downcast(_, variant) => {
1242 use rustc::ty::layout::Layout::*;
1243 match *base_layout {
1244 General { discr, .. } => {
1246 ptr: base.ptr.offset(discr.size().bytes() as isize),
1247 extra: LvalueExtra::DowncastVariant(variant),
1250 RawNullablePointer { .. } | StructWrappedNullablePointer { .. } => {
1253 _ => panic!("variant downcast on non-aggregate: {:?}", base_layout),
1258 let pointee_ty = pointee_type(base_ty).expect("Deref of non-pointer");
1259 let ptr = self.memory.read_ptr(base.ptr)?;
1260 let extra = match pointee_ty.sty {
1261 ty::TySlice(_) | ty::TyStr => {
1262 let len_ptr = base.ptr.offset(self.memory.pointer_size as isize);
1263 let len = self.memory.read_usize(len_ptr)?;
1264 LvalueExtra::Length(len)
1266 ty::TyTrait(_) => unimplemented!(),
1267 _ => LvalueExtra::None,
1269 return Ok(Lvalue { ptr: ptr, extra: extra });
1272 Index(ref operand) => {
1273 let elem_size = match base_ty.sty {
1274 ty::TyArray(elem_ty, _) |
1275 ty::TySlice(elem_ty) => self.type_size(elem_ty),
1276 _ => panic!("indexing expected an array or slice, got {:?}", base_ty),
1278 let n_ptr = self.eval_operand(operand)?;
1279 let n = self.memory.read_usize(n_ptr)?;
1280 base.ptr.offset(n as isize * elem_size as isize)
1283 ConstantIndex { .. } => unimplemented!(),
1288 Ok(Lvalue { ptr: ptr, extra: LvalueExtra::None })
1291 // TODO(solson): Try making const_to_primval instead.
1292 fn const_to_ptr(&mut self, const_val: &const_val::ConstVal) -> EvalResult<Pointer> {
1293 use rustc::middle::const_val::ConstVal::*;
1295 Float(_f) => unimplemented!(),
1297 // TODO(solson): Check int constant type.
1298 let ptr = self.memory.allocate(8);
1299 self.memory.write_uint(ptr, int.to_u64_unchecked(), 8)?;
1303 let psize = self.memory.pointer_size;
1304 let static_ptr = self.memory.allocate(s.len());
1305 let ptr = self.memory.allocate(psize * 2);
1306 self.memory.write_bytes(static_ptr, s.as_bytes())?;
1307 self.memory.write_ptr(ptr, static_ptr)?;
1308 self.memory.write_usize(ptr.offset(psize as isize), s.len() as u64)?;
1311 ByteStr(ref bs) => {
1312 let psize = self.memory.pointer_size;
1313 let static_ptr = self.memory.allocate(bs.len());
1314 let ptr = self.memory.allocate(psize);
1315 self.memory.write_bytes(static_ptr, bs)?;
1316 self.memory.write_ptr(ptr, static_ptr)?;
1320 let ptr = self.memory.allocate(1);
1321 self.memory.write_bool(ptr, b)?;
1324 Char(_c) => unimplemented!(),
1325 Struct(_node_id) => unimplemented!(),
1326 Tuple(_node_id) => unimplemented!(),
1327 Function(_def_id) => unimplemented!(),
1328 Array(_, _) => unimplemented!(),
1329 Repeat(_, _) => unimplemented!(),
1330 Dummy => unimplemented!(),
1334 fn lvalue_ty(&self, lvalue: &mir::Lvalue<'tcx>) -> Ty<'tcx> {
1335 self.monomorphize(self.mir().lvalue_ty(self.tcx, lvalue).to_ty(self.tcx))
1338 fn operand_ty(&self, operand: &mir::Operand<'tcx>) -> Ty<'tcx> {
1339 self.monomorphize(self.mir().operand_ty(self.tcx, operand))
1342 fn monomorphize(&self, ty: Ty<'tcx>) -> Ty<'tcx> {
1343 let substituted = ty.subst(self.tcx, self.substs());
1344 self.tcx.normalize_associated_type(&substituted)
1347 fn type_needs_drop(&self, ty: Ty<'tcx>) -> bool {
1348 self.tcx.type_needs_drop_given_env(ty, &self.tcx.empty_parameter_environment())
1351 fn move_(&mut self, src: Pointer, dest: Pointer, ty: Ty<'tcx>) -> EvalResult<()> {
1352 let size = self.type_size(ty);
1353 self.memory.copy(src, dest, size)?;
1354 if self.type_needs_drop(ty) {
1355 self.memory.drop_fill(src, size)?;
1360 fn type_is_sized(&self, ty: Ty<'tcx>) -> bool {
1361 ty.is_sized(self.tcx, &self.tcx.empty_parameter_environment(), DUMMY_SP)
1364 fn type_size(&self, ty: Ty<'tcx>) -> usize {
1365 self.type_layout(ty).size(&self.tcx.data_layout).bytes() as usize
1368 fn type_layout(&self, ty: Ty<'tcx>) -> &'tcx Layout {
1369 // TODO(solson): Is this inefficient? Needs investigation.
1370 let ty = self.monomorphize(ty);
1372 self.tcx.normalizing_infer_ctxt(ProjectionMode::Any).enter(|infcx| {
1373 // TODO(solson): Report this error properly.
1374 ty.layout(&infcx).unwrap()
1378 pub fn read_primval(&mut self, ptr: Pointer, ty: Ty<'tcx>) -> EvalResult<PrimVal> {
1379 use syntax::ast::{IntTy, UintTy};
1380 let val = match (self.memory.pointer_size, &ty.sty) {
1381 (_, &ty::TyBool) => PrimVal::Bool(self.memory.read_bool(ptr)?),
1382 (_, &ty::TyInt(IntTy::I8)) => PrimVal::I8(self.memory.read_int(ptr, 1)? as i8),
1383 (2, &ty::TyInt(IntTy::Is)) |
1384 (_, &ty::TyInt(IntTy::I16)) => PrimVal::I16(self.memory.read_int(ptr, 2)? as i16),
1385 (4, &ty::TyInt(IntTy::Is)) |
1386 (_, &ty::TyInt(IntTy::I32)) => PrimVal::I32(self.memory.read_int(ptr, 4)? as i32),
1387 (8, &ty::TyInt(IntTy::Is)) |
1388 (_, &ty::TyInt(IntTy::I64)) => PrimVal::I64(self.memory.read_int(ptr, 8)? as i64),
1389 (_, &ty::TyUint(UintTy::U8)) => PrimVal::U8(self.memory.read_uint(ptr, 1)? as u8),
1390 (2, &ty::TyUint(UintTy::Us)) |
1391 (_, &ty::TyUint(UintTy::U16)) => PrimVal::U16(self.memory.read_uint(ptr, 2)? as u16),
1392 (4, &ty::TyUint(UintTy::Us)) |
1393 (_, &ty::TyUint(UintTy::U32)) => PrimVal::U32(self.memory.read_uint(ptr, 4)? as u32),
1394 (8, &ty::TyUint(UintTy::Us)) |
1395 (_, &ty::TyUint(UintTy::U64)) => PrimVal::U64(self.memory.read_uint(ptr, 8)? as u64),
1397 (_, &ty::TyRef(_, ty::TypeAndMut { ty, .. })) |
1398 (_, &ty::TyRawPtr(ty::TypeAndMut { ty, .. })) => {
1399 if self.type_is_sized(ty) {
1400 match self.memory.read_ptr(ptr) {
1401 Ok(p) => PrimVal::AbstractPtr(p),
1402 Err(EvalError::ReadBytesAsPointer) => {
1403 PrimVal::IntegerPtr(self.memory.read_usize(ptr)?)
1405 Err(e) => return Err(e),
1408 return Err(EvalError::Unimplemented(format!("unimplemented: primitive read of fat pointer type: {:?}", ty)));
1412 _ => panic!("primitive read of non-primitive type: {:?}", ty),
1417 fn frame(&self) -> &Frame<'mir, 'tcx> {
1418 self.stack.last().expect("no call frames exist")
1421 fn basic_block(&self) -> &mir::BasicBlockData<'tcx> {
1422 let frame = self.frame();
1423 frame.mir.basic_block_data(frame.next_block)
1426 fn frame_mut(&mut self) -> &mut Frame<'mir, 'tcx> {
1427 self.stack.last_mut().expect("no call frames exist")
1430 fn mir(&self) -> CachedMir<'mir, 'tcx> {
1431 self.frame().mir.clone()
1434 fn substs(&self) -> &'tcx Substs<'tcx> {
1438 fn load_mir(&self, def_id: DefId) -> CachedMir<'mir, 'tcx> {
1439 match self.tcx.map.as_local_node_id(def_id) {
1440 Some(node_id) => CachedMir::Ref(self.mir_map.map.get(&node_id).unwrap()),
1442 let mut mir_cache = self.mir_cache.borrow_mut();
1443 if let Some(mir) = mir_cache.get(&def_id) {
1444 return CachedMir::Owned(mir.clone());
1447 let cs = &self.tcx.sess.cstore;
1448 let mir = cs.maybe_get_item_mir(self.tcx, def_id).unwrap_or_else(|| {
1449 panic!("no mir for {:?}", def_id);
1451 let cached = Rc::new(mir);
1452 mir_cache.insert(def_id, cached.clone());
1453 CachedMir::Owned(cached)
1459 fn pointee_type(ptr_ty: ty::Ty) -> Option<ty::Ty> {
1461 ty::TyRef(_, ty::TypeAndMut { ty, .. }) |
1462 ty::TyRawPtr(ty::TypeAndMut { ty, .. }) |
1471 fn to_ptr(self) -> Pointer {
1472 assert_eq!(self.extra, LvalueExtra::None);
1477 impl<'mir, 'tcx: 'mir> Deref for CachedMir<'mir, 'tcx> {
1478 type Target = mir::Mir<'tcx>;
1479 fn deref(&self) -> &mir::Mir<'tcx> {
1481 CachedMir::Ref(r) => r,
1482 CachedMir::Owned(ref rc) => rc,
1488 pub struct ImplMethod<'tcx> {
1489 pub method: Rc<ty::Method<'tcx>>,
1490 pub substs: &'tcx Substs<'tcx>,
1491 pub is_provided: bool,
1494 /// Locates the applicable definition of a method, given its name.
1495 pub fn get_impl_method<'a, 'tcx>(
1496 tcx: TyCtxt<'a, 'tcx, 'tcx>,
1498 substs: &'tcx Substs<'tcx>,
1500 ) -> ImplMethod<'tcx> {
1501 assert!(!substs.types.needs_infer());
1503 let trait_def_id = tcx.trait_id_of_impl(impl_def_id).unwrap();
1504 let trait_def = tcx.lookup_trait_def(trait_def_id);
1506 match trait_def.ancestors(impl_def_id).fn_defs(tcx, name).next() {
1507 Some(node_item) => {
1508 let substs = tcx.normalizing_infer_ctxt(ProjectionMode::Any).enter(|infcx| {
1509 let substs = traits::translate_substs(&infcx, impl_def_id,
1510 substs, node_item.node);
1511 tcx.lift(&substs).unwrap_or_else(|| {
1512 bug!("trans::meth::get_impl_method: translate_substs \
1513 returned {:?} which contains inference types/regions",
1518 method: node_item.item,
1520 is_provided: node_item.node.is_from_trait(),
1524 bug!("method {:?} not found in {:?}", name, impl_def_id)
1529 pub fn interpret_start_points<'a, 'tcx>(
1530 tcx: TyCtxt<'a, 'tcx, 'tcx>,
1531 mir_map: &MirMap<'tcx>,
1533 let initial_indentation = ::log_settings::settings().indentation;
1534 for (&id, mir) in &mir_map.map {
1535 for attr in tcx.map.attrs(id) {
1536 use syntax::attr::AttrMetaMethods;
1537 if attr.check_name("miri_run") {
1538 let item = tcx.map.expect_item(id);
1540 ::log_settings::settings().indentation = initial_indentation;
1542 debug!("Interpreting: {}", item.name);
1544 let mut gecx = GlobalEvalContext::new(tcx, mir_map);
1545 match gecx.call(mir, tcx.map.local_def_id(id)) {
1546 Ok(Some(return_ptr)) => if log_enabled!(::log::LogLevel::Debug) {
1547 gecx.memory.dump(return_ptr.alloc_id);
1549 Ok(None) => warn!("diverging function returned"),
1551 // TODO(solson): Detect whether the error was already reported or not.
1552 // tcx.sess.err(&e.to_string());
1560 // TODO(solson): Upstream these methods into rustc::ty::layout.
1563 fn size(self) -> Size;
1566 impl IntegerExt for layout::Integer {
1567 fn size(self) -> Size {
1568 use rustc::ty::layout::Integer::*;
1570 I1 | I8 => Size::from_bits(8),
1571 I16 => Size::from_bits(16),
1572 I32 => Size::from_bits(32),
1573 I64 => Size::from_bits(64),
1579 fn field_offset(&self, index: usize) -> Size;
1582 impl StructExt for layout::Struct {
1583 fn field_offset(&self, index: usize) -> Size {
1587 self.offset_after_field[index - 1]