1 use std::collections::{HashMap, HashSet};
4 use rustc::hir::def_id::DefId;
5 use rustc::hir::map::definitions::DefPathData;
6 use rustc::middle::const_val::ConstVal;
7 use rustc::middle::region::CodeExtent;
9 use rustc::traits::Reveal;
10 use rustc::ty::layout::{self, Layout, Size};
11 use rustc::ty::subst::{Subst, Substs, Kind};
12 use rustc::ty::{self, Ty, TyCtxt, TypeFoldable, Binder};
14 use rustc_data_structures::indexed_vec::Idx;
15 use syntax::codemap::{self, DUMMY_SP, Span};
16 use syntax::ast::{self, Mutability};
20 EvalError, EvalResult,
21 Global, GlobalId, Lvalue, LvalueExtra,
22 Memory, MemoryPointer, HasMemory,
25 PrimVal, PrimValKind, Value, Pointer,
30 pub struct EvalContext<'a, 'tcx: 'a, M: Machine<'tcx>> {
31 /// Stores data required by the `Machine`
32 pub machine_data: M::Data,
34 /// The results of the type checker, from rustc.
35 pub tcx: TyCtxt<'a, 'tcx, 'tcx>,
37 /// The virtual memory system.
38 pub memory: Memory<'a, 'tcx, M>,
41 // FIXME(@RalfJung): validation branch
42 /// Lvalues that were suspended by the validation subsystem, and will be recovered later
43 pub(crate) suspended: HashMap<DynamicLifetime, Vec<ValidationQuery<'tcx>>>,
45 /// Precomputed statics, constants and promoteds.
46 pub globals: HashMap<GlobalId<'tcx>, Global<'tcx>>,
48 /// The virtual call stack.
49 pub(crate) stack: Vec<Frame<'tcx>>,
51 /// The maximum number of stack frames allowed
52 pub(crate) stack_limit: usize,
54 /// The maximum number of operations that may be executed.
55 /// This prevents infinite loops and huge computations from freezing up const eval.
56 /// Remove once halting problem is solved.
57 pub(crate) steps_remaining: u64,
61 pub struct Frame<'tcx> {
62 ////////////////////////////////////////////////////////////////////////////////
63 // Function and callsite information
64 ////////////////////////////////////////////////////////////////////////////////
66 /// The MIR for the function called on this frame.
67 pub mir: &'tcx mir::Mir<'tcx>,
69 /// The def_id and substs of the current function
70 pub instance: ty::Instance<'tcx>,
72 /// The span of the call site.
73 pub span: codemap::Span,
75 ////////////////////////////////////////////////////////////////////////////////
76 // Return lvalue and locals
77 ////////////////////////////////////////////////////////////////////////////////
79 /// The block to return to when returning from the current stack frame
80 pub return_to_block: StackPopCleanup,
82 /// The location where the result of the current stack frame should be written to.
83 pub return_lvalue: Lvalue<'tcx>,
85 /// The list of locals for this stack frame, stored in order as
86 /// `[arguments..., variables..., temporaries...]`. The locals are stored as `Option<Value>`s.
87 /// `None` represents a local that is currently dead, while a live local
88 /// can either directly contain `PrimVal` or refer to some part of an `Allocation`.
90 /// Before being initialized, arguments are `Value::ByVal(PrimVal::Undef)` and other locals are `None`.
91 pub locals: Vec<Option<Value>>,
93 ////////////////////////////////////////////////////////////////////////////////
94 // Current position within the function
95 ////////////////////////////////////////////////////////////////////////////////
97 /// The block that is currently executed (or will be executed after the above call stacks
99 pub block: mir::BasicBlock,
101 /// The index of the currently evaluated statment.
105 #[derive(Clone, Debug, Eq, PartialEq, Hash)]
106 pub enum StackPopCleanup {
107 /// The stackframe existed to compute the initial value of a static/constant, make sure it
108 /// isn't modifyable afterwards in case of constants.
109 /// In case of `static mut`, mark the memory to ensure it's never marked as immutable through
110 /// references or deallocated
111 MarkStatic(Mutability),
112 /// A regular stackframe added due to a function call will need to get forwarded to the next
114 Goto(mir::BasicBlock),
115 /// The main function and diverging functions have nowhere to return to
119 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
120 pub struct DynamicLifetime {
122 pub region: Option<CodeExtent>, // "None" indicates "until the function ends"
125 #[derive(Copy, Clone, Debug)]
126 pub struct ResourceLimits {
127 pub memory_size: u64,
129 pub stack_limit: usize,
132 impl Default for ResourceLimits {
133 fn default() -> Self {
135 memory_size: 100 * 1024 * 1024, // 100 MB
136 step_limit: 1_000_000,
142 #[derive(Copy, Clone, Debug)]
143 pub struct TyAndPacked<'tcx> {
148 impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
150 tcx: TyCtxt<'a, 'tcx, 'tcx>,
151 limits: ResourceLimits,
152 machine_data: M::Data,
153 memory_data: M::MemoryData,
158 memory: Memory::new(&tcx.data_layout, limits.memory_size, memory_data),
159 suspended: HashMap::new(),
160 globals: HashMap::new(),
162 stack_limit: limits.stack_limit,
163 steps_remaining: limits.step_limit,
167 pub fn alloc_ptr(&mut self, ty: Ty<'tcx>) -> EvalResult<'tcx, MemoryPointer> {
168 let substs = self.substs();
169 self.alloc_ptr_with_substs(ty, substs)
172 pub fn alloc_ptr_with_substs(
175 substs: &'tcx Substs<'tcx>
176 ) -> EvalResult<'tcx, MemoryPointer> {
177 let size = self.type_size_with_substs(ty, substs)?.expect("cannot alloc memory for unsized type");
178 let align = self.type_align_with_substs(ty, substs)?;
179 self.memory.allocate(size, align, MemoryKind::Stack)
182 pub fn memory(&self) -> &Memory<'a, 'tcx, M> {
186 pub fn memory_mut(&mut self) -> &mut Memory<'a, 'tcx, M> {
190 pub fn stack(&self) -> &[Frame<'tcx>] {
195 pub fn cur_frame(&self) -> usize {
196 assert!(self.stack.len() > 0);
200 /// Returns true if the current frame or any parent frame is part of a ctfe.
202 /// Used to disable features in const eval, which do not have a rfc enabling
203 /// them or which can't be written in a way that they produce the same output
204 /// that evaluating the code at runtime would produce.
205 pub fn const_env(&self) -> bool {
206 for frame in self.stack.iter().rev() {
207 if let StackPopCleanup::MarkStatic(_) = frame.return_to_block {
214 pub(crate) fn str_to_value(&mut self, s: &str) -> EvalResult<'tcx, Value> {
215 let ptr = self.memory.allocate_cached(s.as_bytes())?;
216 Ok(Value::ByValPair(PrimVal::Ptr(ptr), PrimVal::from_u128(s.len() as u128)))
219 pub(super) fn const_to_value(&mut self, const_val: &ConstVal<'tcx>) -> EvalResult<'tcx, Value> {
220 use rustc::middle::const_val::ConstVal::*;
221 use rustc_const_math::ConstFloat;
223 let primval = match *const_val {
224 Integral(const_int) => PrimVal::Bytes(const_int.to_u128_unchecked()),
226 Float(ConstFloat::F32(f)) => PrimVal::from_f32(f),
227 Float(ConstFloat::F64(f)) => PrimVal::from_f64(f),
229 Bool(b) => PrimVal::from_bool(b),
230 Char(c) => PrimVal::from_char(c),
232 Str(ref s) => return self.str_to_value(s),
235 let ptr = self.memory.allocate_cached(bs)?;
239 Variant(_) => unimplemented!(),
240 Struct(_) => unimplemented!(),
241 Tuple(_) => unimplemented!(),
242 // function items are zero sized and thus have no readable value
243 Function(..) => PrimVal::Undef,
244 Array(_) => unimplemented!(),
245 Repeat(_, _) => unimplemented!(),
248 Ok(Value::ByVal(primval))
251 pub(super) fn type_is_sized(&self, ty: Ty<'tcx>) -> bool {
252 // generics are weird, don't run this function on a generic
253 assert!(!ty.needs_subst());
254 ty.is_sized(self.tcx, ty::ParamEnv::empty(Reveal::All), DUMMY_SP)
257 pub fn load_mir(&self, instance: ty::InstanceDef<'tcx>) -> EvalResult<'tcx, &'tcx mir::Mir<'tcx>> {
258 trace!("load mir {:?}", instance);
260 ty::InstanceDef::Item(def_id) => self.tcx.maybe_optimized_mir(def_id).ok_or_else(|| EvalError::NoMirFor(self.tcx.item_path_str(def_id))),
261 _ => Ok(self.tcx.instance_mir(instance)),
265 pub fn monomorphize(&self, ty: Ty<'tcx>, substs: &'tcx Substs<'tcx>) -> Ty<'tcx> {
266 // miri doesn't care about lifetimes, and will choke on some crazy ones
267 // let's simply get rid of them
268 let without_lifetimes = self.tcx.erase_regions(&ty);
269 let substituted = without_lifetimes.subst(self.tcx, substs);
270 self.tcx.normalize_associated_type(&substituted)
273 pub fn erase_lifetimes<T>(&self, value: &Binder<T>) -> T
274 where T : TypeFoldable<'tcx>
276 let value = self.tcx.erase_late_bound_regions(value);
277 self.tcx.erase_regions(&value)
280 pub(super) fn type_size(&self, ty: Ty<'tcx>) -> EvalResult<'tcx, Option<u64>> {
281 self.type_size_with_substs(ty, self.substs())
284 pub(super) fn type_align(&self, ty: Ty<'tcx>) -> EvalResult<'tcx, u64> {
285 self.type_align_with_substs(ty, self.substs())
288 fn type_size_with_substs(
291 substs: &'tcx Substs<'tcx>,
292 ) -> EvalResult<'tcx, Option<u64>> {
293 let layout = self.type_layout_with_substs(ty, substs)?;
294 if layout.is_unsized() {
297 Ok(Some(layout.size(&self.tcx.data_layout).bytes()))
301 fn type_align_with_substs(&self, ty: Ty<'tcx>, substs: &'tcx Substs<'tcx>) -> EvalResult<'tcx, u64> {
302 self.type_layout_with_substs(ty, substs).map(|layout| layout.align(&self.tcx.data_layout).abi())
305 pub fn type_layout(&self, ty: Ty<'tcx>) -> EvalResult<'tcx, &'tcx Layout> {
306 self.type_layout_with_substs(ty, self.substs())
309 fn type_layout_with_substs(&self, ty: Ty<'tcx>, substs: &'tcx Substs<'tcx>) -> EvalResult<'tcx, &'tcx Layout> {
310 // TODO(solson): Is this inefficient? Needs investigation.
311 let ty = self.monomorphize(ty, substs);
313 ty.layout(self.tcx, ty::ParamEnv::empty(Reveal::All)).map_err(EvalError::Layout)
316 pub fn push_stack_frame(
318 instance: ty::Instance<'tcx>,
320 mir: &'tcx mir::Mir<'tcx>,
321 return_lvalue: Lvalue<'tcx>,
322 return_to_block: StackPopCleanup,
323 ) -> EvalResult<'tcx> {
324 ::log_settings::settings().indentation += 1;
326 /// Return the set of locals that have a storage annotation anywhere
327 fn collect_storage_annotations<'tcx>(mir: &'tcx mir::Mir<'tcx>) -> HashSet<mir::Local> {
328 use rustc::mir::StatementKind::*;
330 let mut set = HashSet::new();
331 for block in mir.basic_blocks() {
332 for stmt in block.statements.iter() {
334 StorageLive(mir::Lvalue::Local(local)) | StorageDead(mir::Lvalue::Local(local)) => {
344 // Subtract 1 because `local_decls` includes the ReturnMemoryPointer, but we don't store a local
346 let annotated_locals = collect_storage_annotations(mir);
347 let num_locals = mir.local_decls.len() - 1;
348 let mut locals = vec![None; num_locals];
349 for i in 0..num_locals {
350 let local = mir::Local::new(i+1);
351 if !annotated_locals.contains(&local) {
352 locals[i] = Some(Value::ByVal(PrimVal::Undef));
356 self.stack.push(Frame {
358 block: mir::START_BLOCK,
367 let cur_frame = self.cur_frame();
368 self.memory.set_cur_frame(cur_frame);
370 if self.stack.len() > self.stack_limit {
371 Err(EvalError::StackFrameLimitReached)
377 pub(super) fn pop_stack_frame(&mut self) -> EvalResult<'tcx> {
378 ::log_settings::settings().indentation -= 1;
379 self.memory.locks_lifetime_ended(None);
380 let frame = self.stack.pop().expect("tried to pop a stack frame, but there were none");
381 if !self.stack.is_empty() {
382 // TODO: IS this the correct time to start considering these accesses as originating from the returned-to stack frame?
383 let cur_frame = self.cur_frame();
384 self.memory.set_cur_frame(cur_frame);
386 match frame.return_to_block {
387 StackPopCleanup::MarkStatic(mutable) => if let Lvalue::Global(id) = frame.return_lvalue {
388 let global_value = self.globals.get_mut(&id)
389 .expect("global should have been cached (static)");
390 match global_value.value {
391 // FIXME: to_ptr()? might be too extreme here, static zsts might reach this under certain conditions
392 Value::ByRef { ptr, aligned: _aligned } =>
393 // Alignment does not matter for this call
394 self.memory.mark_static_initalized(ptr.to_ptr()?.alloc_id, mutable)?,
395 Value::ByVal(val) => if let PrimVal::Ptr(ptr) = val {
396 self.memory.mark_inner_allocation(ptr.alloc_id, mutable)?;
398 Value::ByValPair(val1, val2) => {
399 if let PrimVal::Ptr(ptr) = val1 {
400 self.memory.mark_inner_allocation(ptr.alloc_id, mutable)?;
402 if let PrimVal::Ptr(ptr) = val2 {
403 self.memory.mark_inner_allocation(ptr.alloc_id, mutable)?;
407 // see comment on `initialized` field
408 assert!(!global_value.initialized);
409 global_value.initialized = true;
410 assert_eq!(global_value.mutable, Mutability::Mutable);
411 global_value.mutable = mutable;
413 bug!("StackPopCleanup::MarkStatic on: {:?}", frame.return_lvalue);
415 StackPopCleanup::Goto(target) => self.goto_block(target),
416 StackPopCleanup::None => {},
418 // deallocate all locals that are backed by an allocation
419 for local in frame.locals {
420 self.deallocate_local(local)?;
426 pub fn deallocate_local(&mut self, local: Option<Value>) -> EvalResult<'tcx> {
427 if let Some(Value::ByRef { ptr, aligned: _ }) = local {
428 trace!("deallocating local");
429 let ptr = ptr.to_ptr()?;
430 self.memory.dump_alloc(ptr.alloc_id);
431 match self.memory.get(ptr.alloc_id)?.kind {
432 // for a constant like `const FOO: &i32 = &1;` the local containing
433 // the `1` is referred to by the global. We transitively marked everything
434 // the global refers to as static itself, so we don't free it here
435 MemoryKind::Static => {}
436 MemoryKind::Stack => self.memory.deallocate(ptr, None, MemoryKind::Stack)?,
437 other => bug!("local contained non-stack memory: {:?}", other),
443 pub fn assign_discr_and_fields(
448 operands: &[mir::Operand<'tcx>],
452 ) -> EvalResult<'tcx> {
454 let dest_ptr = self.force_allocation(dest)?.to_ptr()?;
456 let discr_dest = dest_ptr.offset(discr_offset, &self)?;
457 self.memory.write_uint(discr_dest, discr_val, discr_size)?;
459 let dest = Lvalue::Ptr {
460 ptr: dest_ptr.into(),
461 extra: LvalueExtra::DowncastVariant(variant_idx),
465 self.assign_fields(dest, dest_ty, operands)
468 pub fn assign_fields(
472 operands: &[mir::Operand<'tcx>],
473 ) -> EvalResult<'tcx> {
474 if self.type_size(dest_ty)? == Some(0) {
475 // zst assigning is a nop
478 if self.ty_to_primval_kind(dest_ty).is_ok() {
479 assert_eq!(operands.len(), 1);
480 let value = self.eval_operand(&operands[0])?;
481 let value_ty = self.operand_ty(&operands[0]);
482 return self.write_value(value, dest, value_ty);
484 for (field_index, operand) in operands.iter().enumerate() {
485 let value = self.eval_operand(operand)?;
486 let value_ty = self.operand_ty(operand);
487 let field_dest = self.lvalue_field(dest, field_index, dest_ty, value_ty)?;
488 self.write_value(value, field_dest, value_ty)?;
493 /// Evaluate an assignment statement.
495 /// There is no separate `eval_rvalue` function. Instead, the code for handling each rvalue
496 /// type writes its results directly into the memory specified by the lvalue.
497 pub(super) fn eval_rvalue_into_lvalue(
499 rvalue: &mir::Rvalue<'tcx>,
500 lvalue: &mir::Lvalue<'tcx>,
501 ) -> EvalResult<'tcx> {
502 let dest = self.eval_lvalue(lvalue)?;
503 let dest_ty = self.lvalue_ty(lvalue);
504 let dest_layout = self.type_layout(dest_ty)?;
506 use rustc::mir::Rvalue::*;
508 Use(ref operand) => {
509 let value = self.eval_operand(operand)?;
510 self.write_value(value, dest, dest_ty)?;
513 BinaryOp(bin_op, ref left, ref right) => {
514 if self.intrinsic_overflowing(bin_op, left, right, dest, dest_ty)? {
515 // There was an overflow in an unchecked binop. Right now, we consider this an error and bail out.
516 // The rationale is that the reason rustc emits unchecked binops in release mode (vs. the checked binops
517 // it emits in debug mode) is performance, but it doesn't cost us any performance in miri.
518 // If, however, the compiler ever starts transforming unchecked intrinsics into unchecked binops,
519 // we have to go back to just ignoring the overflow here.
520 return Err(EvalError::OverflowingMath);
524 CheckedBinaryOp(bin_op, ref left, ref right) => {
525 self.intrinsic_with_overflow(bin_op, left, right, dest, dest_ty)?;
528 UnaryOp(un_op, ref operand) => {
529 let val = self.eval_operand_to_primval(operand)?;
530 let kind = self.ty_to_primval_kind(dest_ty)?;
531 self.write_primval(dest, operator::unary_op(un_op, val, kind)?, dest_ty)?;
534 // Skip everything for zsts
535 Aggregate(..) if self.type_size(dest_ty)? == Some(0) => {}
537 Aggregate(ref kind, ref operands) => {
538 self.inc_step_counter_and_check_limit(operands.len() as u64)?;
539 use rustc::ty::layout::Layout::*;
541 Univariant { .. } | Array { .. } => {
542 self.assign_fields(dest, dest_ty, operands)?;
545 General { discr, ref variants, .. } => {
546 if let mir::AggregateKind::Adt(adt_def, variant, _, _) = **kind {
547 let discr_val = adt_def.discriminants(self.tcx)
549 .expect("broken mir: Adt variant id invalid")
550 .to_u128_unchecked();
551 let discr_size = discr.size().bytes();
553 self.assign_discr_and_fields(
556 variants[variant].offsets[0].bytes(),
563 bug!("tried to assign {:?} to Layout::General", kind);
567 RawNullablePointer { nndiscr, .. } => {
568 if let mir::AggregateKind::Adt(_, variant, _, _) = **kind {
569 if nndiscr == variant as u64 {
570 assert_eq!(operands.len(), 1);
571 let operand = &operands[0];
572 let value = self.eval_operand(operand)?;
573 let value_ty = self.operand_ty(operand);
574 self.write_value(value, dest, value_ty)?;
576 if let Some(operand) = operands.get(0) {
577 assert_eq!(operands.len(), 1);
578 let operand_ty = self.operand_ty(operand);
579 assert_eq!(self.type_size(operand_ty)?, Some(0));
581 self.write_null(dest, dest_ty)?;
584 bug!("tried to assign {:?} to Layout::RawNullablePointer", kind);
588 StructWrappedNullablePointer { nndiscr, ref discrfield, .. } => {
589 if let mir::AggregateKind::Adt(_, variant, _, _) = **kind {
590 if nndiscr == variant as u64 {
591 self.assign_fields(dest, dest_ty, operands)?;
593 for operand in operands {
594 let operand_ty = self.operand_ty(operand);
595 assert_eq!(self.type_size(operand_ty)?, Some(0));
597 let (offset, TyAndPacked { ty, packed: _}) = self.nonnull_offset_and_ty(dest_ty, nndiscr, discrfield)?;
598 // TODO: The packed flag is ignored
601 let dest = self.force_allocation(dest)?.to_ptr()?;
603 let dest = dest.offset(offset.bytes(), &self)?;
604 let dest_size = self.type_size(ty)?
605 .expect("bad StructWrappedNullablePointer discrfield");
606 self.memory.write_int(dest, 0, dest_size)?;
609 bug!("tried to assign {:?} to Layout::RawNullablePointer", kind);
614 assert_eq!(operands.len(), 0);
615 if let mir::AggregateKind::Adt(adt_def, variant, _, _) = **kind {
616 let n = adt_def.discriminants(self.tcx)
618 .expect("broken mir: Adt variant index invalid")
619 .to_u128_unchecked();
620 self.write_primval(dest, PrimVal::Bytes(n), dest_ty)?;
622 bug!("tried to assign {:?} to Layout::CEnum", kind);
626 Vector { count, .. } => {
627 debug_assert_eq!(count, operands.len() as u64);
628 self.assign_fields(dest, dest_ty, operands)?;
631 UntaggedUnion { .. } => {
632 assert_eq!(operands.len(), 1);
633 let operand = &operands[0];
634 let value = self.eval_operand(operand)?;
635 let value_ty = self.operand_ty(operand);
636 self.write_value(value, dest, value_ty)?;
640 return Err(EvalError::Unimplemented(format!(
641 "can't handle destination layout {:?} when assigning {:?}",
649 Repeat(ref operand, _) => {
650 let (elem_ty, length) = match dest_ty.sty {
651 ty::TyArray(elem_ty, n) => (elem_ty, n as u64),
652 _ => bug!("tried to assign array-repeat to non-array type {:?}", dest_ty),
654 self.inc_step_counter_and_check_limit(length)?;
655 let elem_size = self.type_size(elem_ty)?
656 .expect("repeat element type must be sized");
657 let value = self.eval_operand(operand)?;
660 let dest = Pointer::from(self.force_allocation(dest)?.to_ptr()?);
663 let elem_dest = dest.offset(i * elem_size, &self)?;
664 self.write_value_to_ptr(value, elem_dest, elem_ty)?;
669 // FIXME(CTFE): don't allow computing the length of arrays in const eval
670 let src = self.eval_lvalue(lvalue)?;
671 let ty = self.lvalue_ty(lvalue);
672 let (_, len) = src.elem_ty_and_len(ty);
673 self.write_primval(dest, PrimVal::from_u128(len as u128), dest_ty)?;
676 Ref(_, _, ref lvalue) => {
677 let src = self.eval_lvalue(lvalue)?;
678 // We ignore the alignment of the lvalue here -- special handling for packed structs ends
679 // at the `&` operator.
680 let (ptr, extra, _aligned) = self.force_allocation(src)?.to_ptr_extra_aligned();
682 let val = match extra {
683 LvalueExtra::None => ptr.to_value(),
684 LvalueExtra::Length(len) => ptr.to_value_with_len(len),
685 LvalueExtra::Vtable(vtable) => ptr.to_value_with_vtable(vtable),
686 LvalueExtra::DowncastVariant(..) =>
687 bug!("attempted to take a reference to an enum downcast lvalue"),
689 self.write_value(val, dest, dest_ty)?;
692 NullaryOp(mir::NullOp::Box, ty) => {
693 // FIXME(CTFE): don't allow heap allocations in const eval
694 // FIXME: call the `exchange_malloc` lang item if available
695 let size = self.type_size(ty)?.expect("box only works with sized types");
697 let align = self.type_align(ty)?;
698 self.write_primval(dest, PrimVal::Bytes(align.into()), dest_ty)?;
700 let align = self.type_align(ty)?;
701 let ptr = self.memory.allocate(size, align, MemoryKind::Rust)?;
702 self.write_primval(dest, PrimVal::Ptr(ptr), dest_ty)?;
706 NullaryOp(mir::NullOp::SizeOf, ty) => {
707 let size = self.type_size(ty)?.expect("SizeOf nullary MIR operator called for unsized type");
708 self.write_primval(dest, PrimVal::from_u128(size as u128), dest_ty)?;
711 Cast(kind, ref operand, cast_ty) => {
712 debug_assert_eq!(self.monomorphize(cast_ty, self.substs()), dest_ty);
713 use rustc::mir::CastKind::*;
716 let src = self.eval_operand(operand)?;
717 let src_ty = self.operand_ty(operand);
718 self.unsize_into(src, src_ty, dest, dest_ty)?;
722 let src = self.eval_operand(operand)?;
723 let src_ty = self.operand_ty(operand);
724 if self.type_is_fat_ptr(src_ty) {
725 match (src, self.type_is_fat_ptr(dest_ty)) {
726 (Value::ByRef{..}, _) |
727 (Value::ByValPair(..), true) => {
728 self.write_value(src, dest, dest_ty)?;
730 (Value::ByValPair(data, _), false) => {
731 self.write_value(Value::ByVal(data), dest, dest_ty)?;
733 (Value::ByVal(_), _) => bug!("expected fat ptr"),
736 let src_val = self.value_to_primval(src, src_ty)?;
737 let dest_val = self.cast_primval(src_val, src_ty, dest_ty)?;
738 self.write_value(Value::ByVal(dest_val), dest, dest_ty)?;
742 ReifyFnPointer => match self.operand_ty(operand).sty {
743 ty::TyFnDef(def_id, substs) => {
744 let instance = resolve(self.tcx, def_id, substs);
745 let fn_ptr = self.memory.create_fn_alloc(instance);
746 self.write_value(Value::ByVal(PrimVal::Ptr(fn_ptr)), dest, dest_ty)?;
748 ref other => bug!("reify fn pointer on {:?}", other),
751 UnsafeFnPointer => match dest_ty.sty {
753 let src = self.eval_operand(operand)?;
754 self.write_value(src, dest, dest_ty)?;
756 ref other => bug!("fn to unsafe fn cast on {:?}", other),
759 ClosureFnPointer => match self.operand_ty(operand).sty {
760 ty::TyClosure(def_id, substs) => {
761 let instance = resolve_closure(self.tcx, def_id, substs, ty::ClosureKind::FnOnce);
762 let fn_ptr = self.memory.create_fn_alloc(instance);
763 self.write_value(Value::ByVal(PrimVal::Ptr(fn_ptr)), dest, dest_ty)?;
765 ref other => bug!("closure fn pointer on {:?}", other),
770 Discriminant(ref lvalue) => {
771 let lval = self.eval_lvalue(lvalue)?;
772 let ty = self.lvalue_ty(lvalue);
773 let ptr = self.force_allocation(lval)?.to_ptr()?;
774 let discr_val = self.read_discriminant_value(ptr, ty)?;
775 if let ty::TyAdt(adt_def, _) = ty.sty {
776 if adt_def.discriminants(self.tcx).all(|v| discr_val != v.to_u128_unchecked()) {
777 return Err(EvalError::InvalidDiscriminant);
780 bug!("rustc only generates Rvalue::Discriminant for enums");
782 self.write_primval(dest, PrimVal::Bytes(discr_val), dest_ty)?;
786 if log_enabled!(::log::LogLevel::Trace) {
787 self.dump_local(dest);
793 pub(super) fn type_is_fat_ptr(&self, ty: Ty<'tcx>) -> bool {
795 ty::TyRawPtr(ref tam) |
796 ty::TyRef(_, ref tam) => !self.type_is_sized(tam.ty),
797 ty::TyAdt(def, _) if def.is_box() => !self.type_is_sized(ty.boxed_ty()),
802 pub(super) fn nonnull_offset_and_ty(
807 ) -> EvalResult<'tcx, (Size, TyAndPacked<'tcx>)> {
808 // Skip the constant 0 at the start meant for LLVM GEP and the outer non-null variant
809 let path = discrfield.iter().skip(2).map(|&i| i as usize);
811 // Handle the field index for the outer non-null variant.
812 let (inner_offset, inner_ty) = match ty.sty {
813 ty::TyAdt(adt_def, substs) => {
814 let variant = &adt_def.variants[nndiscr as usize];
815 let index = discrfield[1];
816 let field = &variant.fields[index as usize];
817 (self.get_field_offset(ty, index as usize)?, field.ty(self.tcx, substs))
819 _ => bug!("non-enum for StructWrappedNullablePointer: {}", ty),
822 self.field_path_offset_and_ty(inner_offset, inner_ty, path)
825 fn field_path_offset_and_ty<I: Iterator<Item = usize>>(
830 ) -> EvalResult<'tcx, (Size, TyAndPacked<'tcx>)> {
831 // Skip the initial 0 intended for LLVM GEP.
832 let mut packed = false;
833 for field_index in path {
834 let field_offset = self.get_field_offset(ty, field_index)?;
835 trace!("field_path_offset_and_ty: {}, {}, {:?}, {:?}", field_index, ty, field_offset, offset);
836 let field_ty = self.get_field_ty(ty, field_index)?;
838 packed = packed || field_ty.packed;
839 offset = offset.checked_add(field_offset, &self.tcx.data_layout).unwrap();
842 Ok((offset, TyAndPacked { ty, packed }))
844 fn get_fat_field(&self, pointee_ty: Ty<'tcx>, field_index: usize) -> EvalResult<'tcx, Ty<'tcx>> {
845 match (field_index, &self.tcx.struct_tail(pointee_ty).sty) {
847 (1, &ty::TySlice(_)) => Ok(self.tcx.types.usize),
848 (1, &ty::TyDynamic(..)) |
849 (0, _) => Ok(self.tcx.mk_imm_ptr(self.tcx.types.u8)),
850 _ => bug!("invalid fat pointee type: {}", pointee_ty),
854 /// Returns the field type and whether the field is packed
855 pub fn get_field_ty(&self, ty: Ty<'tcx>, field_index: usize) -> EvalResult<'tcx, TyAndPacked<'tcx>> {
857 ty::TyAdt(adt_def, _) if adt_def.is_box() =>
858 Ok(TyAndPacked { ty: self.get_fat_field(ty.boxed_ty(), field_index)?, packed: false }),
859 ty::TyAdt(adt_def, substs) if adt_def.is_enum() => {
860 use rustc::ty::layout::Layout::*;
861 match *self.type_layout(ty)? {
862 RawNullablePointer { nndiscr, .. } =>
863 Ok(TyAndPacked { ty: adt_def.variants[nndiscr as usize].fields[field_index].ty(self.tcx, substs), packed: false }),
864 StructWrappedNullablePointer { nndiscr, ref nonnull, .. } => {
865 let ty = adt_def.variants[nndiscr as usize].fields[field_index].ty(self.tcx, substs);
866 Ok(TyAndPacked { ty, packed: nonnull.packed })
868 _ => Err(EvalError::Unimplemented(format!("get_field_ty can't handle enum type: {:?}, {:?}", ty, ty.sty))),
871 ty::TyAdt(adt_def, substs) => {
872 let variant_def = adt_def.struct_variant();
873 use rustc::ty::layout::Layout::*;
874 match *self.type_layout(ty)? {
875 Univariant { ref variant, .. } =>
876 Ok(TyAndPacked { ty: variant_def.fields[field_index].ty(self.tcx, substs), packed: variant.packed }),
877 _ => Err(EvalError::Unimplemented(format!("get_field_ty can't handle struct type: {:?}, {:?}", ty, ty.sty))),
881 ty::TyTuple(fields, _) => Ok(TyAndPacked { ty: fields[field_index], packed: false }),
883 ty::TyRef(_, ref tam) |
884 ty::TyRawPtr(ref tam) => Ok(TyAndPacked { ty: self.get_fat_field(tam.ty, field_index)?, packed: false }),
886 ty::TyArray(ref inner, _) => Ok(TyAndPacked { ty: inner, packed: false }),
888 _ => Err(EvalError::Unimplemented(format!("can't handle type: {:?}, {:?}", ty, ty.sty))),
892 fn get_field_offset(&self, ty: Ty<'tcx>, field_index: usize) -> EvalResult<'tcx, Size> {
893 // Also see lvalue_field in lvalue.rs, which handles more cases but needs an actual value at the given type
894 let layout = self.type_layout(ty)?;
896 use rustc::ty::layout::Layout::*;
898 Univariant { ref variant, .. } => {
899 Ok(variant.offsets[field_index])
901 FatPointer { .. } => {
902 let bytes = field_index as u64 * self.memory.pointer_size();
903 Ok(Size::from_bytes(bytes))
905 StructWrappedNullablePointer { ref nonnull, .. } => {
906 Ok(nonnull.offsets[field_index])
909 let msg = format!("can't handle type: {:?}, with layout: {:?}", ty, layout);
910 Err(EvalError::Unimplemented(msg))
915 pub fn get_field_count(&self, ty: Ty<'tcx>) -> EvalResult<'tcx, u64> {
916 let layout = self.type_layout(ty)?;
918 use rustc::ty::layout::Layout::*;
920 Univariant { ref variant, .. } => Ok(variant.offsets.len() as u64),
921 FatPointer { .. } => Ok(2),
922 StructWrappedNullablePointer { ref nonnull, .. } => Ok(nonnull.offsets.len() as u64),
923 Vector { count , .. } |
924 Array { count, .. } => Ok(count),
925 Scalar { .. } => Ok(0),
927 let msg = format!("can't handle type: {:?}, with layout: {:?}", ty, layout);
928 Err(EvalError::Unimplemented(msg))
933 pub(super) fn wrapping_pointer_offset(&self, ptr: Pointer, pointee_ty: Ty<'tcx>, offset: i64) -> EvalResult<'tcx, Pointer> {
934 // FIXME: assuming here that type size is < i64::max_value()
935 let pointee_size = self.type_size(pointee_ty)?.expect("cannot offset a pointer to an unsized type") as i64;
936 let offset = offset.overflowing_mul(pointee_size).0;
937 ptr.wrapping_signed_offset(offset, self)
940 pub fn pointer_offset(&self, ptr: Pointer, pointee_ty: Ty<'tcx>, offset: i64) -> EvalResult<'tcx, Pointer> {
941 // This function raises an error if the offset moves the pointer outside of its allocation. We consider
942 // ZSTs their own huge allocation that doesn't overlap with anything (and nothing moves in there because the size is 0).
943 // We also consider the NULL pointer its own separate allocation, and all the remaining integers pointers their own
946 if ptr.is_null()? { // NULL pointers must only be offset by 0
947 return if offset == 0 { Ok(ptr) } else { Err(EvalError::InvalidNullPointerUsage) };
949 // FIXME: assuming here that type size is < i64::max_value()
950 let pointee_size = self.type_size(pointee_ty)?.expect("cannot offset a pointer to an unsized type") as i64;
951 return if let Some(offset) = offset.checked_mul(pointee_size) {
952 let ptr = ptr.signed_offset(offset, self)?;
953 // Do not do bounds-checking for integers; they can never alias a normal pointer anyway.
954 if let PrimVal::Ptr(ptr) = ptr.into_inner_primval() {
955 self.memory.check_bounds(ptr, false)?;
956 } else if ptr.is_null()? {
957 // We moved *to* a NULL pointer. That seems wrong, LLVM considers the NULL pointer its own small allocation. Reject this, for now.
958 return Err(EvalError::InvalidNullPointerUsage);
962 Err(EvalError::OverflowingMath)
966 pub(super) fn eval_operand_to_primval(&mut self, op: &mir::Operand<'tcx>) -> EvalResult<'tcx, PrimVal> {
967 let value = self.eval_operand(op)?;
968 let ty = self.operand_ty(op);
969 self.value_to_primval(value, ty)
972 pub fn eval_operand(&mut self, op: &mir::Operand<'tcx>) -> EvalResult<'tcx, Value> {
973 use rustc::mir::Operand::*;
975 Consume(ref lvalue) => self.eval_and_read_lvalue(lvalue),
977 Constant(ref constant) => {
978 use rustc::mir::Literal;
979 let mir::Constant { ref literal, .. } = **constant;
980 let value = match *literal {
981 Literal::Value { ref value } => self.const_to_value(value)?,
983 Literal::Item { def_id, substs } => {
984 let instance = self.resolve_associated_const(def_id, substs);
985 let cid = GlobalId { instance, promoted: None };
986 self.globals.get(&cid).expect("static/const not cached").value
989 Literal::Promoted { index } => {
991 instance: self.frame().instance,
992 promoted: Some(index),
994 self.globals.get(&cid).expect("promoted not cached").value
1003 pub fn operand_ty(&self, operand: &mir::Operand<'tcx>) -> Ty<'tcx> {
1004 self.monomorphize(operand.ty(self.mir(), self.tcx), self.substs())
1007 fn copy(&mut self, src: Pointer, dest: Pointer, ty: Ty<'tcx>) -> EvalResult<'tcx> {
1008 let size = self.type_size(ty)?.expect("cannot copy from an unsized type");
1009 let align = self.type_align(ty)?;
1010 self.memory.copy(src, dest, size, align, false)?;
1014 pub(super) fn force_allocation(
1016 lvalue: Lvalue<'tcx>,
1017 ) -> EvalResult<'tcx, Lvalue<'tcx>> {
1018 let new_lvalue = match lvalue {
1019 Lvalue::Local { frame, local } => {
1020 // -1 since we don't store the return value
1021 match self.stack[frame].locals[local.index() - 1] {
1022 None => return Err(EvalError::DeadLocal),
1023 Some(Value::ByRef { ptr, aligned }) => {
1024 Lvalue::Ptr { ptr, aligned, extra: LvalueExtra::None }
1027 let ty = self.stack[frame].mir.local_decls[local].ty;
1028 let ty = self.monomorphize(ty, self.stack[frame].instance.substs);
1029 let substs = self.stack[frame].instance.substs;
1030 let ptr = self.alloc_ptr_with_substs(ty, substs)?;
1031 self.stack[frame].locals[local.index() - 1] = Some(Value::by_ref(ptr.into())); // it stays live
1032 self.write_value_to_ptr(val, ptr.into(), ty)?;
1033 Lvalue::from_ptr(ptr)
1037 Lvalue::Ptr { .. } => lvalue,
1038 Lvalue::Global(cid) => {
1039 let global_val = self.globals.get(&cid).expect("global not cached").clone();
1040 match global_val.value {
1041 Value::ByRef { ptr, aligned } =>
1042 Lvalue::Ptr { ptr, aligned, extra: LvalueExtra::None },
1044 let ptr = self.alloc_ptr_with_substs(global_val.ty, cid.instance.substs)?;
1045 self.memory.mark_static(ptr.alloc_id);
1046 self.write_value_to_ptr(global_val.value, ptr.into(), global_val.ty)?;
1047 // see comment on `initialized` field
1048 if global_val.initialized {
1049 self.memory.mark_static_initalized(ptr.alloc_id, global_val.mutable)?;
1051 let lval = self.globals.get_mut(&cid).expect("already checked");
1053 value: Value::by_ref(ptr.into()),
1056 Lvalue::from_ptr(ptr)
1064 /// ensures this Value is not a ByRef
1065 pub(super) fn follow_by_ref_value(&mut self, value: Value, ty: Ty<'tcx>) -> EvalResult<'tcx, Value> {
1067 Value::ByRef { ptr, aligned } => {
1068 self.read_maybe_aligned(aligned, |ectx| ectx.read_value(ptr, ty))
1074 pub fn value_to_primval(&mut self, value: Value, ty: Ty<'tcx>) -> EvalResult<'tcx, PrimVal> {
1075 match self.follow_by_ref_value(value, ty)? {
1076 Value::ByRef{..} => bug!("follow_by_ref_value can't result in `ByRef`"),
1078 Value::ByVal(primval) => {
1079 self.ensure_valid_value(primval, ty)?;
1083 Value::ByValPair(..) => bug!("value_to_primval can't work with fat pointers"),
1091 ) -> EvalResult<'tcx> {
1092 self.write_primval(dest, PrimVal::Bytes(0), dest_ty)
1100 ) -> EvalResult<'tcx> {
1101 self.write_value(val.to_value(), dest, dest_ty)
1104 pub fn write_primval(
1109 ) -> EvalResult<'tcx> {
1110 self.write_value(Value::ByVal(val), dest, dest_ty)
1118 ) -> EvalResult<'tcx> {
1119 //trace!("Writing {:?} to {:?} at type {:?}", src_val, dest, dest_ty);
1120 // Note that it is really important that the type here is the right one, and matches the type things are read at.
1121 // In case `src_val` is a `ByValPair`, we don't do any magic here to handle padding properly, which is only
1122 // correct if we never look at this data with the wrong type.
1125 Lvalue::Global(cid) => {
1126 let dest = self.globals.get_mut(&cid).expect("global should be cached").clone();
1127 if dest.mutable == Mutability::Immutable {
1128 return Err(EvalError::ModifiedConstantMemory);
1130 let write_dest = |this: &mut Self, val| {
1131 *this.globals.get_mut(&cid).expect("already checked") = Global {
1137 self.write_value_possibly_by_val(src_val, write_dest, dest.value, dest_ty)
1140 Lvalue::Ptr { ptr, extra, aligned } => {
1141 assert_eq!(extra, LvalueExtra::None);
1142 self.write_maybe_aligned_mut(aligned,
1143 |ectx| ectx.write_value_to_ptr(src_val, ptr, dest_ty))
1146 Lvalue::Local { frame, local } => {
1147 let dest = self.stack[frame].get_local(local)?;
1148 self.write_value_possibly_by_val(
1150 |this, val| this.stack[frame].set_local(local, val),
1158 // The cases here can be a bit subtle. Read carefully!
1159 fn write_value_possibly_by_val<F: FnOnce(&mut Self, Value) -> EvalResult<'tcx>>(
1163 old_dest_val: Value,
1165 ) -> EvalResult<'tcx> {
1166 if let Value::ByRef { ptr: dest_ptr, aligned } = old_dest_val {
1167 // If the value is already `ByRef` (that is, backed by an `Allocation`),
1168 // then we must write the new value into this allocation, because there may be
1169 // other pointers into the allocation. These other pointers are logically
1170 // pointers into the local variable, and must be able to observe the change.
1172 // Thus, it would be an error to replace the `ByRef` with a `ByVal`, unless we
1173 // knew for certain that there were no outstanding pointers to this allocation.
1174 self.write_maybe_aligned_mut(aligned,
1175 |ectx| ectx.write_value_to_ptr(src_val, dest_ptr, dest_ty))?;
1177 } else if let Value::ByRef { ptr: src_ptr, aligned } = src_val {
1178 // If the value is not `ByRef`, then we know there are no pointers to it
1179 // and we can simply overwrite the `Value` in the locals array directly.
1181 // In this specific case, where the source value is `ByRef`, we must duplicate
1182 // the allocation, because this is a by-value operation. It would be incorrect
1183 // if they referred to the same allocation, since then a change to one would
1184 // implicitly change the other.
1186 // It is a valid optimization to attempt reading a primitive value out of the
1187 // source and write that into the destination without making an allocation, so
1189 self.read_maybe_aligned_mut(aligned, |ectx| {
1190 if let Ok(Some(src_val)) = ectx.try_read_value(src_ptr, dest_ty) {
1191 write_dest(ectx, src_val)?;
1193 let dest_ptr = ectx.alloc_ptr(dest_ty)?.into();
1194 ectx.copy(src_ptr, dest_ptr, dest_ty)?;
1195 write_dest(ectx, Value::by_ref(dest_ptr))?;
1201 // Finally, we have the simple case where neither source nor destination are
1202 // `ByRef`. We may simply copy the source value over the the destintion.
1203 write_dest(self, src_val)?;
1208 pub(super) fn write_value_to_ptr(
1213 ) -> EvalResult<'tcx> {
1215 Value::ByRef { ptr, aligned } => {
1216 self.read_maybe_aligned_mut(aligned, |ectx| ectx.copy(ptr, dest, dest_ty))
1218 Value::ByVal(primval) => {
1219 let size = self.type_size(dest_ty)?.expect("dest type must be sized");
1220 self.memory.write_primval(dest, primval, size)
1222 Value::ByValPair(a, b) => self.write_pair_to_ptr(a, b, dest.to_ptr()?, dest_ty),
1226 pub(super) fn write_pair_to_ptr(
1232 ) -> EvalResult<'tcx> {
1233 let mut packed = false;
1234 while self.get_field_count(ty)? == 1 {
1235 let field = self.get_field_ty(ty, 0)?;
1237 packed = packed || field.packed;
1239 assert_eq!(self.get_field_count(ty)?, 2);
1240 let field_0 = self.get_field_offset(ty, 0)?;
1241 let field_1 = self.get_field_offset(ty, 1)?;
1242 let field_0_ty = self.get_field_ty(ty, 0)?;
1243 let field_1_ty = self.get_field_ty(ty, 1)?;
1244 assert_eq!(field_0_ty.packed, field_1_ty.packed, "the two fields must agree on being packed");
1245 packed = packed || field_0_ty.packed;
1246 let field_0_size = self.type_size(field_0_ty.ty)?.expect("pair element type must be sized");
1247 let field_1_size = self.type_size(field_1_ty.ty)?.expect("pair element type must be sized");
1248 let field_0_ptr = ptr.offset(field_0.bytes(), &self)?.into();
1249 let field_1_ptr = ptr.offset(field_1.bytes(), &self)?.into();
1250 self.write_maybe_aligned_mut(!packed,
1251 |ectx| ectx.memory.write_primval(field_0_ptr, a, field_0_size))?;
1252 self.write_maybe_aligned_mut(!packed,
1253 |ectx| ectx.memory.write_primval(field_1_ptr, b, field_1_size))?;
1257 pub fn ty_to_primval_kind(&self, ty: Ty<'tcx>) -> EvalResult<'tcx, PrimValKind> {
1258 use syntax::ast::FloatTy;
1260 let kind = match ty.sty {
1261 ty::TyBool => PrimValKind::Bool,
1262 ty::TyChar => PrimValKind::Char,
1264 ty::TyInt(int_ty) => {
1265 use syntax::ast::IntTy::*;
1266 let size = match int_ty {
1272 Is => self.memory.pointer_size(),
1274 PrimValKind::from_int_size(size)
1277 ty::TyUint(uint_ty) => {
1278 use syntax::ast::UintTy::*;
1279 let size = match uint_ty {
1285 Us => self.memory.pointer_size(),
1287 PrimValKind::from_uint_size(size)
1290 ty::TyFloat(FloatTy::F32) => PrimValKind::F32,
1291 ty::TyFloat(FloatTy::F64) => PrimValKind::F64,
1293 ty::TyFnPtr(_) => PrimValKind::FnPtr,
1295 ty::TyRef(_, ref tam) |
1296 ty::TyRawPtr(ref tam) if self.type_is_sized(tam.ty) => PrimValKind::Ptr,
1298 ty::TyAdt(def, _) if def.is_box() => PrimValKind::Ptr,
1300 ty::TyAdt(def, substs) => {
1301 use rustc::ty::layout::Layout::*;
1302 match *self.type_layout(ty)? {
1303 CEnum { discr, signed, .. } => {
1304 let size = discr.size().bytes();
1306 PrimValKind::from_int_size(size)
1308 PrimValKind::from_uint_size(size)
1312 RawNullablePointer { value, .. } => {
1313 use rustc::ty::layout::Primitive::*;
1315 // TODO(solson): Does signedness matter here? What should the sign be?
1316 Int(int) => PrimValKind::from_uint_size(int.size().bytes()),
1317 F32 => PrimValKind::F32,
1318 F64 => PrimValKind::F64,
1319 Pointer => PrimValKind::Ptr,
1323 // represent single field structs as their single field
1324 Univariant { .. } => {
1325 // enums with just one variant are no different, but `.struct_variant()` doesn't work for enums
1326 let variant = &def.variants[0];
1327 // FIXME: also allow structs with only a single non zst field
1328 if variant.fields.len() == 1 {
1329 return self.ty_to_primval_kind(variant.fields[0].ty(self.tcx, substs));
1331 return Err(EvalError::TypeNotPrimitive(ty));
1335 _ => return Err(EvalError::TypeNotPrimitive(ty)),
1339 _ => return Err(EvalError::TypeNotPrimitive(ty)),
1345 fn ensure_valid_value(&self, val: PrimVal, ty: Ty<'tcx>) -> EvalResult<'tcx> {
1347 ty::TyBool if val.to_bytes()? > 1 => Err(EvalError::InvalidBool),
1349 ty::TyChar if ::std::char::from_u32(val.to_bytes()? as u32).is_none()
1350 => Err(EvalError::InvalidChar(val.to_bytes()? as u32 as u128)),
1356 pub(super) fn read_value(&self, ptr: Pointer, ty: Ty<'tcx>) -> EvalResult<'tcx, Value> {
1357 if let Some(val) = self.try_read_value(ptr, ty)? {
1360 bug!("primitive read failed for type: {:?}", ty);
1364 pub(crate) fn read_ptr(&self, ptr: MemoryPointer, pointee_ty: Ty<'tcx>) -> EvalResult<'tcx, Value> {
1365 let p = self.memory.read_ptr(ptr)?;
1366 if self.type_is_sized(pointee_ty) {
1369 trace!("reading fat pointer extra of type {}", pointee_ty);
1370 let extra = ptr.offset(self.memory.pointer_size(), self)?;
1371 match self.tcx.struct_tail(pointee_ty).sty {
1372 ty::TyDynamic(..) => Ok(p.to_value_with_vtable(self.memory.read_ptr(extra)?.to_ptr()?)),
1374 ty::TyStr => Ok(p.to_value_with_len(self.memory.read_usize(extra)?)),
1375 _ => bug!("unsized primval ptr read from {:?}", pointee_ty),
1380 fn try_read_value(&self, ptr: Pointer, ty: Ty<'tcx>) -> EvalResult<'tcx, Option<Value>> {
1381 use syntax::ast::FloatTy;
1383 let val = match ty.sty {
1384 ty::TyBool => PrimVal::from_bool(self.memory.read_bool(ptr.to_ptr()?)?),
1386 let c = self.memory.read_uint(ptr.to_ptr()?, 4)? as u32;
1387 match ::std::char::from_u32(c) {
1388 Some(ch) => PrimVal::from_char(ch),
1389 None => return Err(EvalError::InvalidChar(c as u128)),
1393 ty::TyInt(int_ty) => {
1394 use syntax::ast::IntTy::*;
1395 let size = match int_ty {
1401 Is => self.memory.pointer_size(),
1403 // if we transmute a ptr to an isize, reading it back into a primval shouldn't panic
1404 // Due to read_ptr ignoring the sign, we need to jump around some hoops
1405 match self.memory.read_int(ptr.to_ptr()?, size) {
1406 Err(EvalError::ReadPointerAsBytes) if size == self.memory.pointer_size() =>
1407 // Reading as an int failed because we are seeing ptr bytes *and* we are actually reading at ptr size.
1408 // Let's try again, reading a ptr this time.
1409 self.memory.read_ptr(ptr.to_ptr()?)?.into_inner_primval(),
1410 other => PrimVal::from_i128(other?),
1414 ty::TyUint(uint_ty) => {
1415 use syntax::ast::UintTy::*;
1416 let size = match uint_ty {
1422 Us => self.memory.pointer_size(),
1424 // if we transmute a ptr to an usize, reading it back into a primval shouldn't panic
1425 // for consistency's sake, we use the same code as above
1426 match self.memory.read_uint(ptr.to_ptr()?, size) {
1427 Err(EvalError::ReadPointerAsBytes) if size == self.memory.pointer_size() => self.memory.read_ptr(ptr.to_ptr()?)?.into_inner_primval(),
1428 other => PrimVal::from_u128(other?),
1432 ty::TyFloat(FloatTy::F32) => PrimVal::from_f32(self.memory.read_f32(ptr.to_ptr()?)?),
1433 ty::TyFloat(FloatTy::F64) => PrimVal::from_f64(self.memory.read_f64(ptr.to_ptr()?)?),
1435 ty::TyFnPtr(_) => self.memory.read_ptr(ptr.to_ptr()?)?.into_inner_primval(),
1436 ty::TyRef(_, ref tam) |
1437 ty::TyRawPtr(ref tam) => return self.read_ptr(ptr.to_ptr()?, tam.ty).map(Some),
1439 ty::TyAdt(def, _) => {
1441 return self.read_ptr(ptr.to_ptr()?, ty.boxed_ty()).map(Some);
1443 use rustc::ty::layout::Layout::*;
1444 if let CEnum { discr, signed, .. } = *self.type_layout(ty)? {
1445 let size = discr.size().bytes();
1447 PrimVal::from_i128(self.memory.read_int(ptr.to_ptr()?, size)?)
1449 PrimVal::from_u128(self.memory.read_uint(ptr.to_ptr()?, size)?)
1456 _ => return Ok(None),
1459 Ok(Some(Value::ByVal(val)))
1462 pub fn frame(&self) -> &Frame<'tcx> {
1463 self.stack.last().expect("no call frames exist")
1466 pub(super) fn frame_mut(&mut self) -> &mut Frame<'tcx> {
1467 self.stack.last_mut().expect("no call frames exist")
1470 pub(super) fn mir(&self) -> &'tcx mir::Mir<'tcx> {
1474 pub(super) fn substs(&self) -> &'tcx Substs<'tcx> {
1475 self.frame().instance.substs
1486 ) -> EvalResult<'tcx> {
1487 // A<Struct> -> A<Trait> conversion
1488 let (src_pointee_ty, dest_pointee_ty) = self.tcx.struct_lockstep_tails(sty, dty);
1490 match (&src_pointee_ty.sty, &dest_pointee_ty.sty) {
1491 (&ty::TyArray(_, length), &ty::TySlice(_)) => {
1492 let ptr = src.into_ptr(&self.memory)?;
1493 // u64 cast is from usize to u64, which is always good
1494 self.write_value(ptr.to_value_with_len(length as u64), dest, dest_ty)
1496 (&ty::TyDynamic(..), &ty::TyDynamic(..)) => {
1497 // For now, upcasts are limited to changes in marker
1498 // traits, and hence never actually require an actual
1499 // change to the vtable.
1500 self.write_value(src, dest, dest_ty)
1502 (_, &ty::TyDynamic(ref data, _)) => {
1503 let trait_ref = data.principal().unwrap().with_self_ty(self.tcx, src_pointee_ty);
1504 let trait_ref = self.tcx.erase_regions(&trait_ref);
1505 let vtable = self.get_vtable(src_pointee_ty, trait_ref)?;
1506 let ptr = src.into_ptr(&self.memory)?;
1507 self.write_value(ptr.to_value_with_vtable(vtable), dest, dest_ty)
1510 _ => bug!("invalid unsizing {:?} -> {:?}", src_ty, dest_ty),
1520 ) -> EvalResult<'tcx> {
1521 match (&src_ty.sty, &dest_ty.sty) {
1522 (&ty::TyRef(_, ref s), &ty::TyRef(_, ref d)) |
1523 (&ty::TyRef(_, ref s), &ty::TyRawPtr(ref d)) |
1524 (&ty::TyRawPtr(ref s), &ty::TyRawPtr(ref d)) => self.unsize_into_ptr(src, src_ty, dest, dest_ty, s.ty, d.ty),
1525 (&ty::TyAdt(def_a, substs_a), &ty::TyAdt(def_b, substs_b)) => {
1526 if def_a.is_box() || def_b.is_box() {
1527 if !def_a.is_box() || !def_b.is_box() {
1528 panic!("invalid unsizing between {:?} -> {:?}", src_ty, dest_ty);
1530 return self.unsize_into_ptr(src, src_ty, dest, dest_ty, src_ty.boxed_ty(), dest_ty.boxed_ty());
1532 if self.ty_to_primval_kind(src_ty).is_ok() {
1533 // TODO: We ignore the packed flag here
1534 let sty = self.get_field_ty(src_ty, 0)?.ty;
1535 let dty = self.get_field_ty(dest_ty, 0)?.ty;
1536 return self.unsize_into(src, sty, dest, dty);
1538 // unsizing of generic struct with pointer fields
1539 // Example: `Arc<T>` -> `Arc<Trait>`
1540 // here we need to increase the size of every &T thin ptr field to a fat ptr
1542 assert_eq!(def_a, def_b);
1544 let src_fields = def_a.variants[0].fields.iter();
1545 let dst_fields = def_b.variants[0].fields.iter();
1547 //let src = adt::MaybeSizedValue::sized(src);
1548 //let dst = adt::MaybeSizedValue::sized(dst);
1549 let src_ptr = match src {
1550 Value::ByRef { ptr, aligned: true } => ptr,
1551 // TODO: Is it possible for unaligned pointers to occur here?
1552 _ => bug!("expected aligned pointer, got {:?}", src),
1556 let dest = self.force_allocation(dest)?.to_ptr()?;
1557 let iter = src_fields.zip(dst_fields).enumerate();
1558 for (i, (src_f, dst_f)) in iter {
1559 let src_fty = monomorphize_field_ty(self.tcx, src_f, substs_a);
1560 let dst_fty = monomorphize_field_ty(self.tcx, dst_f, substs_b);
1561 if self.type_size(dst_fty)? == Some(0) {
1564 let src_field_offset = self.get_field_offset(src_ty, i)?.bytes();
1565 let dst_field_offset = self.get_field_offset(dest_ty, i)?.bytes();
1566 let src_f_ptr = src_ptr.offset(src_field_offset, &self)?;
1567 let dst_f_ptr = dest.offset(dst_field_offset, &self)?;
1568 if src_fty == dst_fty {
1569 self.copy(src_f_ptr, dst_f_ptr.into(), src_fty)?;
1571 self.unsize_into(Value::by_ref(src_f_ptr), src_fty, Lvalue::from_ptr(dst_f_ptr), dst_fty)?;
1576 _ => bug!("unsize_into: invalid conversion: {:?} -> {:?}", src_ty, dest_ty),
1580 pub fn dump_local(&self, lvalue: Lvalue<'tcx>) {
1582 if let Lvalue::Local { frame, local } = lvalue {
1583 let mut allocs = Vec::new();
1584 let mut msg = format!("{:?}", local);
1585 if frame != self.cur_frame() {
1586 write!(msg, " ({} frames up)", self.cur_frame() - frame).unwrap();
1588 write!(msg, ":").unwrap();
1590 match self.stack[frame].get_local(local) {
1591 Err(EvalError::DeadLocal) => {
1592 write!(msg, " is dead").unwrap();
1595 panic!("Failed to access local: {:?}", err);
1597 Ok(Value::ByRef { ptr, aligned }) => match ptr.into_inner_primval() {
1598 PrimVal::Ptr(ptr) => {
1599 write!(msg, " by {}ref:", if aligned { "" } else { "unaligned " }).unwrap();
1600 allocs.push(ptr.alloc_id);
1602 ptr => write!(msg, " integral by ref: {:?}", ptr).unwrap(),
1604 Ok(Value::ByVal(val)) => {
1605 write!(msg, " {:?}", val).unwrap();
1606 if let PrimVal::Ptr(ptr) = val { allocs.push(ptr.alloc_id); }
1608 Ok(Value::ByValPair(val1, val2)) => {
1609 write!(msg, " ({:?}, {:?})", val1, val2).unwrap();
1610 if let PrimVal::Ptr(ptr) = val1 { allocs.push(ptr.alloc_id); }
1611 if let PrimVal::Ptr(ptr) = val2 { allocs.push(ptr.alloc_id); }
1616 self.memory.dump_allocs(allocs);
1620 /// Convenience function to ensure correct usage of globals and code-sharing with locals.
1621 pub fn modify_global<F>(&mut self, cid: GlobalId<'tcx>, f: F) -> EvalResult<'tcx>
1622 where F: FnOnce(&mut Self, Value) -> EvalResult<'tcx, Value>,
1624 let mut val = self.globals.get(&cid).expect("global not cached").clone();
1625 if val.mutable == Mutability::Immutable {
1626 return Err(EvalError::ModifiedConstantMemory);
1628 val.value = f(self, val.value)?;
1629 *self.globals.get_mut(&cid).expect("already checked") = val;
1633 /// Convenience function to ensure correct usage of locals and code-sharing with globals.
1634 pub fn modify_local<F>(
1639 ) -> EvalResult<'tcx>
1640 where F: FnOnce(&mut Self, Value) -> EvalResult<'tcx, Value>,
1642 let val = self.stack[frame].get_local(local)?;
1643 let new_val = f(self, val)?;
1644 self.stack[frame].set_local(local, new_val)?;
1645 // FIXME(solson): Run this when setting to Undef? (See previous version of this code.)
1646 // if let Value::ByRef(ptr) = self.stack[frame].get_local(local) {
1647 // self.memory.deallocate(ptr)?;
1652 pub fn report(&self, e: &EvalError) {
1653 if let Some(frame) = self.stack().last() {
1654 let block = &frame.mir.basic_blocks()[frame.block];
1655 let span = if frame.stmt < block.statements.len() {
1656 block.statements[frame.stmt].source_info.span
1658 block.terminator().source_info.span
1660 let mut err = self.tcx.sess.struct_span_err(span, &e.to_string());
1661 for &Frame { instance, span, .. } in self.stack().iter().rev() {
1662 if self.tcx.def_key(instance.def_id()).disambiguated_data.data == DefPathData::ClosureExpr {
1663 err.span_note(span, "inside call to closure");
1666 err.span_note(span, &format!("inside call to {}", instance));
1670 self.tcx.sess.err(&e.to_string());
1675 impl<'tcx> Frame<'tcx> {
1676 pub fn get_local(&self, local: mir::Local) -> EvalResult<'tcx, Value> {
1677 // Subtract 1 because we don't store a value for the ReturnPointer, the local with index 0.
1678 self.locals[local.index() - 1].ok_or(EvalError::DeadLocal)
1681 fn set_local(&mut self, local: mir::Local, value: Value) -> EvalResult<'tcx> {
1682 // Subtract 1 because we don't store a value for the ReturnPointer, the local with index 0.
1683 match self.locals[local.index() - 1] {
1684 None => Err(EvalError::DeadLocal),
1685 Some(ref mut local) => {
1692 pub fn storage_live(&mut self, local: mir::Local) -> EvalResult<'tcx, Option<Value>> {
1693 trace!("{:?} is now live", local);
1695 let old = self.locals[local.index() - 1];
1696 self.locals[local.index() - 1] = Some(Value::ByVal(PrimVal::Undef)); // StorageLive *always* kills the value that's currently stored
1700 /// Returns the old value of the local
1701 pub fn storage_dead(&mut self, local: mir::Local) -> EvalResult<'tcx, Option<Value>> {
1702 trace!("{:?} is now dead", local);
1704 let old = self.locals[local.index() - 1];
1705 self.locals[local.index() - 1] = None;
1710 // TODO(solson): Upstream these methods into rustc::ty::layout.
1712 pub(super) trait IntegerExt {
1713 fn size(self) -> Size;
1716 impl IntegerExt for layout::Integer {
1717 fn size(self) -> Size {
1718 use rustc::ty::layout::Integer::*;
1720 I1 | I8 => Size::from_bits(8),
1721 I16 => Size::from_bits(16),
1722 I32 => Size::from_bits(32),
1723 I64 => Size::from_bits(64),
1724 I128 => Size::from_bits(128),
1730 pub fn monomorphize_field_ty<'a, 'tcx:'a >(tcx: TyCtxt<'a, 'tcx, 'tcx>, f: &ty::FieldDef, substs: &'tcx Substs<'tcx>) -> Ty<'tcx> {
1731 let substituted = f.ty(tcx, substs);
1732 tcx.normalize_associated_type(&substituted)
1735 pub fn is_inhabited<'a, 'tcx: 'a>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool {
1736 ty.uninhabited_from(&mut HashMap::default(), tcx).is_empty()
1739 /// FIXME: expose trans::monomorphize::resolve_closure
1740 pub fn resolve_closure<'a, 'tcx> (
1741 tcx: TyCtxt<'a, 'tcx, 'tcx>,
1743 substs: ty::ClosureSubsts<'tcx>,
1744 requested_kind: ty::ClosureKind,
1745 ) -> ty::Instance<'tcx> {
1746 let actual_kind = tcx.closure_kind(def_id);
1747 match needs_fn_once_adapter_shim(actual_kind, requested_kind) {
1748 Ok(true) => fn_once_adapter_instance(tcx, def_id, substs),
1749 _ => ty::Instance::new(def_id, substs.substs)
1753 fn fn_once_adapter_instance<'a, 'tcx>(
1754 tcx: TyCtxt<'a, 'tcx, 'tcx>,
1756 substs: ty::ClosureSubsts<'tcx>,
1757 ) -> ty::Instance<'tcx> {
1758 debug!("fn_once_adapter_shim({:?}, {:?})",
1761 let fn_once = tcx.lang_items.fn_once_trait().unwrap();
1762 let call_once = tcx.associated_items(fn_once)
1763 .find(|it| it.kind == ty::AssociatedKind::Method)
1765 let def = ty::InstanceDef::ClosureOnceShim { call_once };
1767 let self_ty = tcx.mk_closure_from_closure_substs(
1768 closure_did, substs);
1770 let sig = tcx.fn_sig(closure_did).subst(tcx, substs.substs);
1771 let sig = tcx.erase_late_bound_regions_and_normalize(&sig);
1772 assert_eq!(sig.inputs().len(), 1);
1773 let substs = tcx.mk_substs([
1774 Kind::from(self_ty),
1775 Kind::from(sig.inputs()[0]),
1778 debug!("fn_once_adapter_shim: self_ty={:?} sig={:?}", self_ty, sig);
1779 ty::Instance { def, substs }
1782 fn needs_fn_once_adapter_shim(actual_closure_kind: ty::ClosureKind,
1783 trait_closure_kind: ty::ClosureKind)
1786 match (actual_closure_kind, trait_closure_kind) {
1787 (ty::ClosureKind::Fn, ty::ClosureKind::Fn) |
1788 (ty::ClosureKind::FnMut, ty::ClosureKind::FnMut) |
1789 (ty::ClosureKind::FnOnce, ty::ClosureKind::FnOnce) => {
1790 // No adapter needed.
1793 (ty::ClosureKind::Fn, ty::ClosureKind::FnMut) => {
1794 // The closure fn `llfn` is a `fn(&self, ...)`. We want a
1795 // `fn(&mut self, ...)`. In fact, at trans time, these are
1796 // basically the same thing, so we can just return llfn.
1799 (ty::ClosureKind::Fn, ty::ClosureKind::FnOnce) |
1800 (ty::ClosureKind::FnMut, ty::ClosureKind::FnOnce) => {
1801 // The closure fn `llfn` is a `fn(&self, ...)` or `fn(&mut
1802 // self, ...)`. We want a `fn(self, ...)`. We can produce
1803 // this by doing something like:
1805 // fn call_once(self, ...) { call_mut(&self, ...) }
1806 // fn call_once(mut self, ...) { call_mut(&mut self, ...) }
1808 // These are both the same at trans time.
1815 /// The point where linking happens. Resolve a (def_id, substs)
1816 /// pair to an instance.
1817 pub fn resolve<'a, 'tcx>(
1818 tcx: TyCtxt<'a, 'tcx, 'tcx>,
1820 substs: &'tcx Substs<'tcx>
1821 ) -> ty::Instance<'tcx> {
1822 debug!("resolve(def_id={:?}, substs={:?})",
1824 let result = if let Some(trait_def_id) = tcx.trait_of_item(def_id) {
1825 debug!(" => associated item, attempting to find impl");
1826 let item = tcx.associated_item(def_id);
1827 resolve_associated_item(tcx, &item, trait_def_id, substs)
1829 let item_type = def_ty(tcx, def_id, substs);
1830 let def = match item_type.sty {
1831 ty::TyFnDef(..) if {
1832 let f = item_type.fn_sig(tcx);
1833 f.abi() == Abi::RustIntrinsic ||
1834 f.abi() == Abi::PlatformIntrinsic
1837 debug!(" => intrinsic");
1838 ty::InstanceDef::Intrinsic(def_id)
1841 if Some(def_id) == tcx.lang_items.drop_in_place_fn() {
1842 let ty = substs.type_at(0);
1843 if needs_drop_glue(tcx, ty) {
1844 debug!(" => nontrivial drop glue");
1845 ty::InstanceDef::DropGlue(def_id, Some(ty))
1847 debug!(" => trivial drop glue");
1848 ty::InstanceDef::DropGlue(def_id, None)
1851 debug!(" => free item");
1852 ty::InstanceDef::Item(def_id)
1856 ty::Instance { def, substs }
1858 debug!("resolve(def_id={:?}, substs={:?}) = {}",
1859 def_id, substs, result);
1863 pub fn needs_drop_glue<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, t: Ty<'tcx>) -> bool {
1864 assert!(t.is_normalized_for_trans());
1866 let t = tcx.erase_regions(&t);
1868 // FIXME (#22815): note that type_needs_drop conservatively
1869 // approximates in some cases and may say a type expression
1870 // requires drop glue when it actually does not.
1872 // (In this case it is not clear whether any harm is done, i.e.
1873 // erroneously returning `true` in some cases where we could have
1874 // returned `false` does not appear unsound. The impact on
1875 // code quality is unknown at this time.)
1877 let env = ty::ParamEnv::empty(Reveal::All);
1878 if !t.needs_drop(tcx, env) {
1882 ty::TyAdt(def, _) if def.is_box() => {
1883 let typ = t.boxed_ty();
1884 if !typ.needs_drop(tcx, env) && type_is_sized(tcx, typ) {
1885 let layout = t.layout(tcx, ty::ParamEnv::empty(Reveal::All)).unwrap();
1886 // `Box<ZeroSizeType>` does not allocate.
1887 layout.size(&tcx.data_layout).bytes() != 0
1896 fn resolve_associated_item<'a, 'tcx>(
1897 tcx: TyCtxt<'a, 'tcx, 'tcx>,
1898 trait_item: &ty::AssociatedItem,
1900 rcvr_substs: &'tcx Substs<'tcx>
1901 ) -> ty::Instance<'tcx> {
1902 let def_id = trait_item.def_id;
1903 debug!("resolve_associated_item(trait_item={:?}, \
1906 def_id, trait_id, rcvr_substs);
1908 let trait_ref = ty::TraitRef::from_method(tcx, trait_id, rcvr_substs);
1909 let vtbl = fulfill_obligation(tcx, DUMMY_SP, ty::Binder(trait_ref));
1911 // Now that we know which impl is being used, we can dispatch to
1912 // the actual function:
1914 ::rustc::traits::VtableImpl(impl_data) => {
1915 let (def_id, substs) = ::rustc::traits::find_associated_item(
1916 tcx, trait_item, rcvr_substs, &impl_data);
1917 let substs = tcx.erase_regions(&substs);
1918 ty::Instance::new(def_id, substs)
1920 ::rustc::traits::VtableClosure(closure_data) => {
1921 let trait_closure_kind = tcx.lang_items.fn_trait_kind(trait_id).unwrap();
1922 resolve_closure(tcx, closure_data.closure_def_id, closure_data.substs,
1925 ::rustc::traits::VtableFnPointer(ref data) => {
1927 def: ty::InstanceDef::FnPtrShim(trait_item.def_id, data.fn_ty),
1931 ::rustc::traits::VtableObject(ref data) => {
1932 let index = tcx.get_vtable_index_of_object_method(data, def_id);
1934 def: ty::InstanceDef::Virtual(def_id, index),
1939 bug!("static call to invalid vtable: {:?}", vtbl)
1944 pub fn def_ty<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
1946 substs: &'tcx Substs<'tcx>)
1949 let ty = tcx.type_of(def_id);
1950 apply_param_substs(tcx, substs, &ty)
1953 /// Monomorphizes a type from the AST by first applying the in-scope
1954 /// substitutions and then normalizing any associated types.
1955 pub fn apply_param_substs<'a, 'tcx, T>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
1956 param_substs: &Substs<'tcx>,
1959 where T: ::rustc::infer::TransNormalize<'tcx>
1961 debug!("apply_param_substs(param_substs={:?}, value={:?})", param_substs, value);
1962 let substituted = value.subst(tcx, param_substs);
1963 let substituted = tcx.erase_regions(&substituted);
1964 AssociatedTypeNormalizer{ tcx }.fold(&substituted)
1968 struct AssociatedTypeNormalizer<'a, 'tcx: 'a> {
1969 tcx: TyCtxt<'a, 'tcx, 'tcx>,
1972 impl<'a, 'tcx> AssociatedTypeNormalizer<'a, 'tcx> {
1973 fn fold<T: TypeFoldable<'tcx>>(&mut self, value: &T) -> T {
1974 if !value.has_projection_types() {
1977 value.fold_with(self)
1982 impl<'a, 'tcx> ::rustc::ty::fold::TypeFolder<'tcx, 'tcx> for AssociatedTypeNormalizer<'a, 'tcx> {
1983 fn tcx<'c>(&'c self) -> TyCtxt<'c, 'tcx, 'tcx> {
1987 fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
1988 if !ty.has_projection_types() {
1991 self.tcx.normalize_associated_type(&ty)
1996 fn type_is_sized<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool {
1997 // generics are weird, don't run this function on a generic
1998 assert!(!ty.needs_subst());
1999 ty.is_sized(tcx, ty::ParamEnv::empty(Reveal::All), DUMMY_SP)
2002 /// Attempts to resolve an obligation. The result is a shallow vtable resolution -- meaning that we
2003 /// do not (necessarily) resolve all nested obligations on the impl. Note that type check should
2004 /// guarantee to us that all nested obligations *could be* resolved if we wanted to.
2005 fn fulfill_obligation<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
2007 trait_ref: ty::PolyTraitRef<'tcx>)
2008 -> traits::Vtable<'tcx, ()>
2010 // Remove any references to regions; this helps improve caching.
2011 let trait_ref = tcx.erase_regions(&trait_ref);
2013 debug!("trans::fulfill_obligation(trait_ref={:?}, def_id={:?})",
2014 trait_ref, trait_ref.def_id());
2016 // Do the initial selection for the obligation. This yields the
2017 // shallow result we are looking for -- that is, what specific impl.
2018 tcx.infer_ctxt().enter(|infcx| {
2019 let mut selcx = traits::SelectionContext::new(&infcx);
2021 let obligation_cause = traits::ObligationCause::misc(span,
2022 ast::DUMMY_NODE_ID);
2023 let obligation = traits::Obligation::new(obligation_cause,
2024 ty::ParamEnv::empty(Reveal::All),
2025 trait_ref.to_poly_trait_predicate());
2027 let selection = match selcx.select(&obligation) {
2028 Ok(Some(selection)) => selection,
2030 // Ambiguity can happen when monomorphizing during trans
2031 // expands to some humongo type that never occurred
2032 // statically -- this humongo type can then overflow,
2033 // leading to an ambiguous result. So report this as an
2034 // overflow bug, since I believe this is the only case
2035 // where ambiguity can result.
2036 debug!("Encountered ambiguity selecting `{:?}` during trans, \
2037 presuming due to overflow",
2039 tcx.sess.span_fatal(span,
2040 "reached the recursion limit during monomorphization \
2041 (selection ambiguity)");
2044 span_bug!(span, "Encountered error `{:?}` selecting `{:?}` during trans",
2049 debug!("fulfill_obligation: selection={:?}", selection);
2051 // Currently, we use a fulfillment context to completely resolve
2052 // all nested obligations. This is because they can inform the
2053 // inference of the impl's type parameters.
2054 let mut fulfill_cx = traits::FulfillmentContext::new();
2055 let vtable = selection.map(|predicate| {
2056 debug!("fulfill_obligation: register_predicate_obligation {:?}", predicate);
2057 fulfill_cx.register_predicate_obligation(&infcx, predicate);
2059 let vtable = infcx.drain_fulfillment_cx_or_panic(span, &mut fulfill_cx, &vtable);
2061 debug!("Cache miss: {:?} => {:?}", trait_ref, vtable);
2066 pub fn resolve_drop_in_place<'a, 'tcx>(
2067 tcx: TyCtxt<'a, 'tcx, 'tcx>,
2069 ) -> ty::Instance<'tcx>
2071 let def_id = tcx.require_lang_item(::rustc::middle::lang_items::DropInPlaceFnLangItem);
2072 let substs = tcx.intern_substs(&[Kind::from(ty)]);
2073 resolve(tcx, def_id, substs)