2 use std::hash::{Hash, Hasher};
5 use rustc::hir::def_id::DefId;
6 use rustc::hir::def::Def;
7 use rustc::hir::map::definitions::DefPathData;
9 use rustc::ty::layout::{self, Size, Align, HasDataLayout, IntegerExt, LayoutOf, TyLayout, Primitive};
10 use rustc::ty::subst::{Subst, Substs};
11 use rustc::ty::{self, Ty, TyCtxt, TypeAndMut};
12 use rustc::ty::query::TyCtxtAt;
13 use rustc_data_structures::fx::{FxHashSet, FxHasher};
14 use rustc_data_structures::indexed_vec::{IndexVec, Idx};
15 use rustc::mir::interpret::{
16 GlobalId, Value, Scalar, FrameInfo, AllocType,
17 EvalResult, EvalErrorKind, Pointer, ConstValue,
20 use syntax::codemap::{self, Span};
21 use syntax::ast::Mutability;
23 use super::{Place, PlaceExtra, Memory,
24 HasMemory, MemoryKind,
27 macro_rules! validation_failure{
28 ($what:expr, $where:expr, $details:expr) => {{
29 let where_ = if $where.is_empty() {
32 format!(" at {}", $where)
34 err!(ValidationFailure(format!(
35 "encountered {}{}, but expected {}",
36 $what, where_, $details,
39 ($what:expr, $where:expr) => {{
40 let where_ = if $where.is_empty() {
43 format!(" at {}", $where)
45 err!(ValidationFailure(format!(
52 pub struct EvalContext<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mir, 'tcx>> {
53 /// Stores the `Machine` instance.
56 /// The results of the type checker, from rustc.
57 pub tcx: TyCtxtAt<'a, 'tcx, 'tcx>,
59 /// Bounds in scope for polymorphic evaluations.
60 pub param_env: ty::ParamEnv<'tcx>,
62 /// The virtual memory system.
63 pub memory: Memory<'a, 'mir, 'tcx, M>,
65 /// The virtual call stack.
66 pub(crate) stack: Vec<Frame<'mir, 'tcx>>,
68 /// The maximum number of stack frames allowed
69 pub(crate) stack_limit: usize,
71 /// When this value is negative, it indicates the number of interpreter
72 /// steps *until* the loop detector is enabled. When it is positive, it is
73 /// the number of steps after the detector has been enabled modulo the loop
75 pub(crate) steps_since_detector_enabled: isize,
77 pub(crate) loop_detector: InfiniteLoopDetector<'a, 'mir, 'tcx, M>,
82 pub struct Frame<'mir, 'tcx: 'mir> {
83 ////////////////////////////////////////////////////////////////////////////////
84 // Function and callsite information
85 ////////////////////////////////////////////////////////////////////////////////
86 /// The MIR for the function called on this frame.
87 pub mir: &'mir mir::Mir<'tcx>,
89 /// The def_id and substs of the current function
90 pub instance: ty::Instance<'tcx>,
92 /// The span of the call site.
93 pub span: codemap::Span,
95 ////////////////////////////////////////////////////////////////////////////////
96 // Return place and locals
97 ////////////////////////////////////////////////////////////////////////////////
98 /// The block to return to when returning from the current stack frame
99 pub return_to_block: StackPopCleanup,
101 /// The location where the result of the current stack frame should be written to.
102 pub return_place: Place,
104 /// The list of locals for this stack frame, stored in order as
105 /// `[return_ptr, arguments..., variables..., temporaries...]`. The locals are stored as `Option<Value>`s.
106 /// `None` represents a local that is currently dead, while a live local
107 /// can either directly contain `Scalar` or refer to some part of an `Allocation`.
109 /// Before being initialized, arguments are `Value::Scalar(Scalar::undef())` and other locals are `None`.
110 pub locals: IndexVec<mir::Local, Option<Value>>,
112 ////////////////////////////////////////////////////////////////////////////////
113 // Current position within the function
114 ////////////////////////////////////////////////////////////////////////////////
115 /// The block that is currently executed (or will be executed after the above call stacks
117 pub block: mir::BasicBlock,
119 /// The index of the currently evaluated statement.
123 impl<'mir, 'tcx: 'mir> Eq for Frame<'mir, 'tcx> {}
125 impl<'mir, 'tcx: 'mir> PartialEq for Frame<'mir, 'tcx> {
126 fn eq(&self, other: &Self) -> bool {
138 // Some of these are constant during evaluation, but are included
139 // anyways for correctness.
140 *instance == other.instance
141 && *return_to_block == other.return_to_block
142 && *return_place == other.return_place
143 && *locals == other.locals
144 && *block == other.block
145 && *stmt == other.stmt
149 impl<'mir, 'tcx: 'mir> Hash for Frame<'mir, 'tcx> {
150 fn hash<H: Hasher>(&self, state: &mut H) {
162 instance.hash(state);
163 return_to_block.hash(state);
164 return_place.hash(state);
171 /// The virtual machine state during const-evaluation at a given point in time.
172 type EvalSnapshot<'a, 'mir, 'tcx, M>
173 = (M, Vec<Frame<'mir, 'tcx>>, Memory<'a, 'mir, 'tcx, M>);
175 pub(crate) struct InfiniteLoopDetector<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'mir, 'tcx>> {
176 /// The set of all `EvalSnapshot` *hashes* observed by this detector.
178 /// When a collision occurs in this table, we store the full snapshot in
180 hashes: FxHashSet<u64>,
182 /// The set of all `EvalSnapshot`s observed by this detector.
184 /// An `EvalSnapshot` will only be fully cloned once it has caused a
185 /// collision in `hashes`. As a result, the detector must observe at least
186 /// *two* full cycles of an infinite loop before it triggers.
187 snapshots: FxHashSet<EvalSnapshot<'a, 'mir, 'tcx, M>>,
190 impl<'a, 'mir, 'tcx, M> Default for InfiniteLoopDetector<'a, 'mir, 'tcx, M>
191 where M: Machine<'mir, 'tcx>,
194 fn default() -> Self {
195 InfiniteLoopDetector {
196 hashes: FxHashSet::default(),
197 snapshots: FxHashSet::default(),
202 impl<'a, 'mir, 'tcx, M> InfiniteLoopDetector<'a, 'mir, 'tcx, M>
203 where M: Machine<'mir, 'tcx>,
206 /// Returns `true` if the loop detector has not yet observed a snapshot.
207 pub fn is_empty(&self) -> bool {
208 self.hashes.is_empty()
211 pub fn observe_and_analyze(
214 stack: &Vec<Frame<'mir, 'tcx>>,
215 memory: &Memory<'a, 'mir, 'tcx, M>,
216 ) -> EvalResult<'tcx, ()> {
217 let snapshot = (machine, stack, memory);
219 let mut fx = FxHasher::default();
220 snapshot.hash(&mut fx);
221 let hash = fx.finish();
223 if self.hashes.insert(hash) {
228 if self.snapshots.insert((machine.clone(), stack.clone(), memory.clone())) {
229 // Spurious collision or first cycle
234 Err(EvalErrorKind::InfiniteLoop.into())
238 #[derive(Clone, Debug, Eq, PartialEq, Hash)]
239 pub enum StackPopCleanup {
240 /// The stackframe existed to compute the initial value of a static/constant, make sure it
241 /// isn't modifyable afterwards in case of constants.
242 /// In case of `static mut`, mark the memory to ensure it's never marked as immutable through
243 /// references or deallocated
244 MarkStatic(Mutability),
245 /// A regular stackframe added due to a function call will need to get forwarded to the next
247 Goto(mir::BasicBlock),
248 /// The main function and diverging functions have nowhere to return to
252 #[derive(Copy, Clone, Debug)]
253 pub struct TyAndPacked<'tcx> {
258 #[derive(Copy, Clone, Debug)]
259 pub struct ValTy<'tcx> {
264 impl<'tcx> ::std::ops::Deref for ValTy<'tcx> {
266 fn deref(&self) -> &Value {
271 impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> HasDataLayout for &'a EvalContext<'a, 'mir, 'tcx, M> {
273 fn data_layout(&self) -> &layout::TargetDataLayout {
274 &self.tcx.data_layout
278 impl<'c, 'b, 'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> HasDataLayout
279 for &'c &'b mut EvalContext<'a, 'mir, 'tcx, M> {
281 fn data_layout(&self) -> &layout::TargetDataLayout {
282 &self.tcx.data_layout
286 impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> layout::HasTyCtxt<'tcx> for &'a EvalContext<'a, 'mir, 'tcx, M> {
288 fn tcx<'b>(&'b self) -> TyCtxt<'b, 'tcx, 'tcx> {
293 impl<'c, 'b, 'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> layout::HasTyCtxt<'tcx>
294 for &'c &'b mut EvalContext<'a, 'mir, 'tcx, M> {
296 fn tcx<'d>(&'d self) -> TyCtxt<'d, 'tcx, 'tcx> {
301 impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> LayoutOf for &'a EvalContext<'a, 'mir, 'tcx, M> {
303 type TyLayout = EvalResult<'tcx, TyLayout<'tcx>>;
305 fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
306 self.tcx.layout_of(self.param_env.and(ty))
307 .map_err(|layout| EvalErrorKind::Layout(layout).into())
311 impl<'c, 'b, 'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> LayoutOf
312 for &'c &'b mut EvalContext<'a, 'mir, 'tcx, M> {
314 type TyLayout = EvalResult<'tcx, TyLayout<'tcx>>;
317 fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
318 (&**self).layout_of(ty)
322 const STEPS_UNTIL_DETECTOR_ENABLED: isize = 1_000_000;
324 impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> {
326 tcx: TyCtxtAt<'a, 'tcx, 'tcx>,
327 param_env: ty::ParamEnv<'tcx>,
329 memory_data: M::MemoryData,
335 memory: Memory::new(tcx, memory_data),
337 stack_limit: tcx.sess.const_eval_stack_frame_limit,
338 loop_detector: Default::default(),
339 steps_since_detector_enabled: -STEPS_UNTIL_DETECTOR_ENABLED,
343 pub(crate) fn with_fresh_body<F: FnOnce(&mut Self) -> R, R>(&mut self, f: F) -> R {
344 let stack = mem::replace(&mut self.stack, Vec::new());
345 let steps = mem::replace(&mut self.steps_since_detector_enabled, -STEPS_UNTIL_DETECTOR_ENABLED);
348 self.steps_since_detector_enabled = steps;
352 pub fn alloc_ptr(&mut self, layout: TyLayout<'tcx>) -> EvalResult<'tcx, Pointer> {
353 assert!(!layout.is_unsized(), "cannot alloc memory for unsized type");
355 self.memory.allocate(layout.size, layout.align, MemoryKind::Stack)
358 pub fn memory(&self) -> &Memory<'a, 'mir, 'tcx, M> {
362 pub fn memory_mut(&mut self) -> &mut Memory<'a, 'mir, 'tcx, M> {
366 pub fn stack(&self) -> &[Frame<'mir, 'tcx>] {
371 pub fn cur_frame(&self) -> usize {
372 assert!(self.stack.len() > 0);
376 pub fn str_to_value(&mut self, s: &str) -> EvalResult<'tcx, Value> {
377 let ptr = self.memory.allocate_bytes(s.as_bytes());
378 Ok(Scalar::Ptr(ptr).to_value_with_len(s.len() as u64, self.tcx.tcx))
381 pub fn const_to_value(
383 val: ConstValue<'tcx>,
384 ) -> EvalResult<'tcx, Value> {
386 ConstValue::Unevaluated(def_id, substs) => {
387 let instance = self.resolve(def_id, substs)?;
388 self.read_global_as_value(GlobalId {
393 ConstValue::ByRef(alloc, offset) => {
394 // FIXME: Allocate new AllocId for all constants inside
395 let id = self.memory.allocate_value(alloc.clone(), MemoryKind::Stack)?;
396 Ok(Value::ByRef(Pointer::new(id, offset).into(), alloc.align))
398 ConstValue::ScalarPair(a, b) => Ok(Value::ScalarPair(a, b)),
399 ConstValue::Scalar(val) => Ok(Value::Scalar(val)),
403 pub(super) fn resolve(&self, def_id: DefId, substs: &'tcx Substs<'tcx>) -> EvalResult<'tcx, ty::Instance<'tcx>> {
404 trace!("resolve: {:?}, {:#?}", def_id, substs);
405 trace!("substs: {:#?}", self.substs());
406 trace!("param_env: {:#?}", self.param_env);
407 let substs = self.tcx.subst_and_normalize_erasing_regions(
412 ty::Instance::resolve(
417 ).ok_or_else(|| EvalErrorKind::TooGeneric.into())
420 pub(super) fn type_is_sized(&self, ty: Ty<'tcx>) -> bool {
421 ty.is_sized(self.tcx, self.param_env)
426 instance: ty::InstanceDef<'tcx>,
427 ) -> EvalResult<'tcx, &'tcx mir::Mir<'tcx>> {
428 // do not continue if typeck errors occurred (can only occur in local crate)
429 let did = instance.def_id();
430 if did.is_local() && self.tcx.has_typeck_tables(did) && self.tcx.typeck_tables_of(did).tainted_by_errors {
431 return err!(TypeckError);
433 trace!("load mir {:?}", instance);
435 ty::InstanceDef::Item(def_id) => {
436 self.tcx.maybe_optimized_mir(def_id).ok_or_else(||
437 EvalErrorKind::NoMirFor(self.tcx.item_path_str(def_id)).into()
440 _ => Ok(self.tcx.instance_mir(instance)),
444 pub fn monomorphize(&self, ty: Ty<'tcx>, substs: &'tcx Substs<'tcx>) -> Ty<'tcx> {
445 // miri doesn't care about lifetimes, and will choke on some crazy ones
446 // let's simply get rid of them
447 let substituted = ty.subst(*self.tcx, substs);
448 self.tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), substituted)
451 /// Return the size and aligment of the value at the given type.
452 /// Note that the value does not matter if the type is sized. For unsized types,
453 /// the value has to be a fat pointer, and we only care about the "extra" data in it.
454 pub fn size_and_align_of_dst(
458 ) -> EvalResult<'tcx, (Size, Align)> {
459 let layout = self.layout_of(ty)?;
460 if !layout.is_unsized() {
461 Ok(layout.size_and_align())
464 ty::TyAdt(..) | ty::TyTuple(..) => {
465 // First get the size of all statically known fields.
466 // Don't use type_of::sizing_type_of because that expects t to be sized,
467 // and it also rounds up to alignment, which we want to avoid,
468 // as the unsized field's alignment could be smaller.
469 assert!(!ty.is_simd());
470 debug!("DST {} layout: {:?}", ty, layout);
472 let sized_size = layout.fields.offset(layout.fields.count() - 1);
473 let sized_align = layout.align;
475 "DST {} statically sized prefix size: {:?} align: {:?}",
481 // Recurse to get the size of the dynamically sized field (must be
483 let field_ty = layout.field(&self, layout.fields.count() - 1)?.ty;
484 let (unsized_size, unsized_align) =
485 self.size_and_align_of_dst(field_ty, value)?;
487 // FIXME (#26403, #27023): We should be adding padding
488 // to `sized_size` (to accommodate the `unsized_align`
489 // required of the unsized field that follows) before
490 // summing it with `sized_size`. (Note that since #26403
491 // is unfixed, we do not yet add the necessary padding
492 // here. But this is where the add would go.)
494 // Return the sum of sizes and max of aligns.
495 let size = sized_size + unsized_size;
497 // Choose max of two known alignments (combined value must
498 // be aligned according to more restrictive of the two).
499 let align = sized_align.max(unsized_align);
501 // Issue #27023: must add any necessary padding to `size`
502 // (to make it a multiple of `align`) before returning it.
504 // Namely, the returned size should be, in C notation:
506 // `size + ((size & (align-1)) ? align : 0)`
508 // emulated via the semi-standard fast bit trick:
510 // `(size + (align-1)) & -align`
512 Ok((size.abi_align(align), align))
514 ty::TyDynamic(..) => {
515 let (_, vtable) = self.into_ptr_vtable_pair(value)?;
516 // the second entry in the vtable is the dynamic size of the object.
517 self.read_size_and_align_from_vtable(vtable)
520 ty::TySlice(_) | ty::TyStr => {
521 let (elem_size, align) = layout.field(&self, 0)?.size_and_align();
522 let (_, len) = self.into_slice(value)?;
523 Ok((elem_size * len, align))
526 _ => bug!("size_of_val::<{:?}>", ty),
531 pub fn push_stack_frame(
533 instance: ty::Instance<'tcx>,
535 mir: &'mir mir::Mir<'tcx>,
537 return_to_block: StackPopCleanup,
538 ) -> EvalResult<'tcx> {
539 ::log_settings::settings().indentation += 1;
541 let locals = if mir.local_decls.len() > 1 {
542 let mut locals = IndexVec::from_elem(Some(Value::Scalar(Scalar::undef())), &mir.local_decls);
543 match self.tcx.describe_def(instance.def_id()) {
544 // statics and constants don't have `Storage*` statements, no need to look for them
545 Some(Def::Static(..)) | Some(Def::Const(..)) | Some(Def::AssociatedConst(..)) => {},
547 trace!("push_stack_frame: {:?}: num_bbs: {}", span, mir.basic_blocks().len());
548 for block in mir.basic_blocks() {
549 for stmt in block.statements.iter() {
550 use rustc::mir::StatementKind::{StorageDead, StorageLive};
553 StorageDead(local) => locals[local] = None,
562 // don't allocate at all for trivial constants
566 self.stack.push(Frame {
568 block: mir::START_BLOCK,
577 self.memory.cur_frame = self.cur_frame();
579 if self.stack.len() > self.stack_limit {
580 err!(StackFrameLimitReached)
586 pub(super) fn pop_stack_frame(&mut self) -> EvalResult<'tcx> {
587 ::log_settings::settings().indentation -= 1;
588 M::end_region(self, None)?;
589 let frame = self.stack.pop().expect(
590 "tried to pop a stack frame, but there were none",
592 if !self.stack.is_empty() {
593 // TODO: Is this the correct time to start considering these accesses as originating from the returned-to stack frame?
594 self.memory.cur_frame = self.cur_frame();
596 match frame.return_to_block {
597 StackPopCleanup::MarkStatic(mutable) => {
598 if let Place::Ptr { ptr, .. } = frame.return_place {
599 // FIXME: to_ptr()? might be too extreme here, static zsts might reach this under certain conditions
600 self.memory.mark_static_initialized(
601 ptr.to_ptr()?.alloc_id,
605 bug!("StackPopCleanup::MarkStatic on: {:?}", frame.return_place);
608 StackPopCleanup::Goto(target) => self.goto_block(target),
609 StackPopCleanup::None => {}
611 // deallocate all locals that are backed by an allocation
612 for local in frame.locals {
613 self.deallocate_local(local)?;
619 pub fn deallocate_local(&mut self, local: Option<Value>) -> EvalResult<'tcx> {
620 if let Some(Value::ByRef(ptr, _align)) = local {
621 trace!("deallocating local");
622 let ptr = ptr.to_ptr()?;
623 self.memory.dump_alloc(ptr.alloc_id);
624 self.memory.deallocate_local(ptr)?;
629 /// Evaluate an assignment statement.
631 /// There is no separate `eval_rvalue` function. Instead, the code for handling each rvalue
632 /// type writes its results directly into the memory specified by the place.
633 pub(super) fn eval_rvalue_into_place(
635 rvalue: &mir::Rvalue<'tcx>,
636 place: &mir::Place<'tcx>,
637 ) -> EvalResult<'tcx> {
638 let dest = self.eval_place(place)?;
639 let dest_ty = self.place_ty(place);
641 use rustc::mir::Rvalue::*;
643 Use(ref operand) => {
644 let value = self.eval_operand(operand)?.value;
649 self.write_value(valty, dest)?;
652 BinaryOp(bin_op, ref left, ref right) => {
653 let left = self.eval_operand(left)?;
654 let right = self.eval_operand(right)?;
655 self.intrinsic_overflowing(
664 CheckedBinaryOp(bin_op, ref left, ref right) => {
665 let left = self.eval_operand(left)?;
666 let right = self.eval_operand(right)?;
667 self.intrinsic_with_overflow(
676 UnaryOp(un_op, ref operand) => {
677 let val = self.eval_operand_to_scalar(operand)?;
678 let val = self.unary_op(un_op, val, dest_ty)?;
686 Aggregate(ref kind, ref operands) => {
687 let (dest, active_field_index) = match **kind {
688 mir::AggregateKind::Adt(adt_def, variant_index, _, active_field_index) => {
689 self.write_discriminant_value(dest_ty, dest, variant_index)?;
690 if adt_def.is_enum() {
691 (self.place_downcast(dest, variant_index)?, active_field_index)
693 (dest, active_field_index)
699 let layout = self.layout_of(dest_ty)?;
700 for (i, operand) in operands.iter().enumerate() {
701 let value = self.eval_operand(operand)?;
702 // Ignore zero-sized fields.
703 if !self.layout_of(value.ty)?.is_zst() {
704 let field_index = active_field_index.unwrap_or(i);
705 let (field_dest, _) = self.place_field(dest, mir::Field::new(field_index), layout)?;
706 self.write_value(value, field_dest)?;
711 Repeat(ref operand, _) => {
712 let (elem_ty, length) = match dest_ty.sty {
713 ty::TyArray(elem_ty, n) => (elem_ty, n.unwrap_usize(self.tcx.tcx)),
716 "tried to assign array-repeat to non-array type {:?}",
721 let elem_size = self.layout_of(elem_ty)?.size;
722 let value = self.eval_operand(operand)?.value;
724 let (dest, dest_align) = self.force_allocation(dest)?.to_ptr_align();
727 //write the first value
728 self.write_value_to_ptr(value, dest, dest_align, elem_ty)?;
731 let rest = dest.ptr_offset(elem_size * 1 as u64, &self)?;
732 self.memory.copy_repeatedly(dest, dest_align, rest, dest_align, elem_size, length - 1, false)?;
738 // FIXME(CTFE): don't allow computing the length of arrays in const eval
739 let src = self.eval_place(place)?;
740 let ty = self.place_ty(place);
741 let (_, len) = src.elem_ty_and_len(ty, self.tcx.tcx);
742 let defined = self.memory.pointer_size().bits() as u8;
753 Ref(_, _, ref place) => {
754 let src = self.eval_place(place)?;
755 // We ignore the alignment of the place here -- special handling for packed structs ends
756 // at the `&` operator.
757 let (ptr, _align, extra) = self.force_allocation(src)?.to_ptr_align_extra();
759 let val = match extra {
760 PlaceExtra::None => ptr.to_value(),
761 PlaceExtra::Length(len) => ptr.to_value_with_len(len, self.tcx.tcx),
762 PlaceExtra::Vtable(vtable) => ptr.to_value_with_vtable(vtable),
763 PlaceExtra::DowncastVariant(..) => {
764 bug!("attempted to take a reference to an enum downcast place")
771 self.write_value(valty, dest)?;
774 NullaryOp(mir::NullOp::Box, ty) => {
775 let ty = self.monomorphize(ty, self.substs());
776 M::box_alloc(self, ty, dest)?;
779 NullaryOp(mir::NullOp::SizeOf, ty) => {
780 let ty = self.monomorphize(ty, self.substs());
781 let layout = self.layout_of(ty)?;
782 assert!(!layout.is_unsized(),
783 "SizeOf nullary MIR operator called for unsized type");
784 let defined = self.memory.pointer_size().bits() as u8;
788 bits: layout.size.bytes() as u128,
795 Cast(kind, ref operand, cast_ty) => {
796 debug_assert_eq!(self.monomorphize(cast_ty, self.substs()), dest_ty);
797 let src = self.eval_operand(operand)?;
798 self.cast(src, kind, dest_ty, dest)?;
801 Discriminant(ref place) => {
802 let ty = self.place_ty(place);
803 let layout = self.layout_of(ty)?;
804 let place = self.eval_place(place)?;
805 let discr_val = self.read_discriminant_value(place, layout)?;
806 let defined = self.layout_of(dest_ty).unwrap().size.bits() as u8;
807 self.write_scalar(dest, Scalar::Bits {
814 self.dump_local(dest);
819 pub(super) fn type_is_fat_ptr(&self, ty: Ty<'tcx>) -> bool {
821 ty::TyRawPtr(ty::TypeAndMut { ty, .. }) |
822 ty::TyRef(_, ty, _) => !self.type_is_sized(ty),
823 ty::TyAdt(def, _) if def.is_box() => !self.type_is_sized(ty.boxed_ty()),
828 pub(super) fn eval_operand_to_scalar(
830 op: &mir::Operand<'tcx>,
831 ) -> EvalResult<'tcx, Scalar> {
832 let valty = self.eval_operand(op)?;
833 self.value_to_scalar(valty)
836 pub(crate) fn operands_to_args(
838 ops: &[mir::Operand<'tcx>],
839 ) -> EvalResult<'tcx, Vec<ValTy<'tcx>>> {
841 .map(|op| self.eval_operand(op))
845 pub fn eval_operand(&mut self, op: &mir::Operand<'tcx>) -> EvalResult<'tcx, ValTy<'tcx>> {
846 use rustc::mir::Operand::*;
847 let ty = self.monomorphize(op.ty(self.mir(), *self.tcx), self.substs());
849 // FIXME: do some more logic on `move` to invalidate the old location
853 value: self.eval_and_read_place(place)?,
858 Constant(ref constant) => {
859 let value = self.const_to_value(constant.literal.val)?;
869 /// reads a tag and produces the corresponding variant index
870 pub fn read_discriminant_as_variant_index(
873 layout: TyLayout<'tcx>,
874 ) -> EvalResult<'tcx, usize> {
875 match layout.variants {
876 ty::layout::Variants::Single { index } => Ok(index),
877 ty::layout::Variants::Tagged { .. } => {
878 let discr_val = self.read_discriminant_value(place, layout)?;
882 .expect("tagged layout for non adt")
883 .discriminants(self.tcx.tcx)
884 .position(|var| var.val == discr_val)
885 .ok_or_else(|| EvalErrorKind::InvalidDiscriminant.into())
887 ty::layout::Variants::NicheFilling { .. } => {
888 let discr_val = self.read_discriminant_value(place, layout)?;
889 assert_eq!(discr_val as usize as u128, discr_val);
890 Ok(discr_val as usize)
895 pub fn read_discriminant_value(
898 layout: TyLayout<'tcx>,
899 ) -> EvalResult<'tcx, u128> {
900 trace!("read_discriminant_value {:#?}", layout);
901 if layout.abi == layout::Abi::Uninhabited {
905 match layout.variants {
906 layout::Variants::Single { index } => {
907 let discr_val = layout.ty.ty_adt_def().map_or(
909 |def| def.discriminant_for_variant(*self.tcx, index).val);
910 return Ok(discr_val);
912 layout::Variants::Tagged { .. } |
913 layout::Variants::NicheFilling { .. } => {},
915 let discr_place_val = self.read_place(place)?;
916 let (discr_val, discr) = self.read_field(discr_place_val, None, mir::Field::new(0), layout)?;
917 trace!("discr value: {:?}, {:?}", discr_val, discr);
918 let raw_discr = self.value_to_scalar(ValTy {
922 let discr_val = match layout.variants {
923 layout::Variants::Single { .. } => bug!(),
924 // FIXME: should we catch invalid discriminants here?
925 layout::Variants::Tagged { .. } => {
926 if discr.ty.is_signed() {
927 let i = raw_discr.to_bits(discr.size)? as i128;
928 // going from layout tag type to typeck discriminant type
929 // requires first sign extending with the layout discriminant
930 let shift = 128 - discr.size.bits();
931 let sexted = (i << shift) >> shift;
932 // and then zeroing with the typeck discriminant type
933 let discr_ty = layout
935 .ty_adt_def().expect("tagged layout corresponds to adt")
938 let discr_ty = layout::Integer::from_attr(self.tcx.tcx, discr_ty);
939 let shift = 128 - discr_ty.size().bits();
940 let truncatee = sexted as u128;
941 (truncatee << shift) >> shift
943 raw_discr.to_bits(discr.size)?
946 layout::Variants::NicheFilling {
952 let variants_start = *niche_variants.start() as u128;
953 let variants_end = *niche_variants.end() as u128;
956 assert!(niche_start == 0);
957 assert!(variants_start == variants_end);
958 dataful_variant as u128
960 Scalar::Bits { bits: raw_discr, defined } => {
961 if defined < discr.size.bits() as u8 {
962 return err!(ReadUndefBytes);
964 let discr = raw_discr.wrapping_sub(niche_start)
965 .wrapping_add(variants_start);
966 if variants_start <= discr && discr <= variants_end {
969 dataful_variant as u128
980 pub fn write_discriminant_value(
984 variant_index: usize,
985 ) -> EvalResult<'tcx> {
986 let layout = self.layout_of(dest_ty)?;
988 match layout.variants {
989 layout::Variants::Single { index } => {
990 if index != variant_index {
991 // If the layout of an enum is `Single`, all
992 // other variants are necessarily uninhabited.
993 assert_eq!(layout.for_variant(&self, variant_index).abi,
994 layout::Abi::Uninhabited);
997 layout::Variants::Tagged { ref tag, .. } => {
998 let discr_val = dest_ty.ty_adt_def().unwrap()
999 .discriminant_for_variant(*self.tcx, variant_index)
1002 // raw discriminants for enums are isize or bigger during
1003 // their computation, but the in-memory tag is the smallest possible
1005 let size = tag.value.size(self.tcx.tcx).bits();
1006 let shift = 128 - size;
1007 let discr_val = (discr_val << shift) >> shift;
1009 let (discr_dest, tag) = self.place_field(dest, mir::Field::new(0), layout)?;
1010 self.write_scalar(discr_dest, Scalar::Bits {
1012 defined: size as u8,
1015 layout::Variants::NicheFilling {
1021 if variant_index != dataful_variant {
1022 let (niche_dest, niche) =
1023 self.place_field(dest, mir::Field::new(0), layout)?;
1024 let niche_value = ((variant_index - niche_variants.start()) as u128)
1025 .wrapping_add(niche_start);
1026 self.write_scalar(niche_dest, Scalar::Bits {
1028 defined: niche.size.bits() as u8,
1037 pub fn read_global_as_value(&mut self, gid: GlobalId<'tcx>) -> EvalResult<'tcx, Value> {
1038 let cv = self.const_eval(gid)?;
1039 self.const_to_value(cv.val)
1042 pub fn const_eval(&self, gid: GlobalId<'tcx>) -> EvalResult<'tcx, &'tcx ty::Const<'tcx>> {
1043 let param_env = if self.tcx.is_static(gid.instance.def_id()).is_some() {
1044 ty::ParamEnv::reveal_all()
1048 self.tcx.const_eval(param_env.and(gid)).map_err(|err| EvalErrorKind::ReferencedConstant(err).into())
1051 pub fn allocate_place_for_value(
1054 layout: TyLayout<'tcx>,
1055 variant: Option<usize>,
1056 ) -> EvalResult<'tcx, Place> {
1057 let (ptr, align) = match value {
1058 Value::ByRef(ptr, align) => (ptr, align),
1059 Value::ScalarPair(..) | Value::Scalar(_) => {
1060 let ptr = self.alloc_ptr(layout)?.into();
1061 self.write_value_to_ptr(value, ptr, layout.align, layout.ty)?;
1068 extra: variant.map_or(PlaceExtra::None, PlaceExtra::DowncastVariant),
1072 pub fn force_allocation(&mut self, place: Place) -> EvalResult<'tcx, Place> {
1073 let new_place = match place {
1074 Place::Local { frame, local } => {
1075 match self.stack[frame].locals[local] {
1076 None => return err!(DeadLocal),
1077 Some(Value::ByRef(ptr, align)) => {
1081 extra: PlaceExtra::None,
1085 let ty = self.stack[frame].mir.local_decls[local].ty;
1086 let ty = self.monomorphize(ty, self.stack[frame].instance.substs);
1087 let layout = self.layout_of(ty)?;
1088 let ptr = self.alloc_ptr(layout)?;
1089 self.stack[frame].locals[local] =
1090 Some(Value::ByRef(ptr.into(), layout.align)); // it stays live
1091 let place = Place::from_ptr(ptr, layout.align);
1092 self.write_value(ValTy { value: val, ty }, place)?;
1097 Place::Ptr { .. } => place,
1102 /// ensures this Value is not a ByRef
1103 pub fn follow_by_ref_value(
1107 ) -> EvalResult<'tcx, Value> {
1109 Value::ByRef(ptr, align) => {
1110 self.read_value(ptr, align, ty)
1116 pub fn value_to_scalar(
1118 ValTy { value, ty } : ValTy<'tcx>,
1119 ) -> EvalResult<'tcx, Scalar> {
1120 match self.follow_by_ref_value(value, ty)? {
1121 Value::ByRef { .. } => bug!("follow_by_ref_value can't result in `ByRef`"),
1123 Value::Scalar(scalar) => Ok(scalar),
1125 Value::ScalarPair(..) => bug!("value_to_scalar can't work with fat pointers"),
1129 pub fn write_ptr(&mut self, dest: Place, val: Scalar, dest_ty: Ty<'tcx>) -> EvalResult<'tcx> {
1131 value: val.to_value(),
1134 self.write_value(valty, dest)
1137 pub fn write_scalar(
1142 ) -> EvalResult<'tcx> {
1144 value: Value::Scalar(val),
1147 self.write_value(valty, dest)
1152 ValTy { value: src_val, ty: dest_ty } : ValTy<'tcx>,
1154 ) -> EvalResult<'tcx> {
1155 //trace!("Writing {:?} to {:?} at type {:?}", src_val, dest, dest_ty);
1156 // Note that it is really important that the type here is the right one, and matches the type things are read at.
1157 // In case `src_val` is a `ScalarPair`, we don't do any magic here to handle padding properly, which is only
1158 // correct if we never look at this data with the wrong type.
1161 Place::Ptr { ptr, align, extra } => {
1162 assert_eq!(extra, PlaceExtra::None);
1163 self.write_value_to_ptr(src_val, ptr, align, dest_ty)
1166 Place::Local { frame, local } => {
1167 let dest = self.stack[frame].get_local(local)?;
1168 self.write_value_possibly_by_val(
1170 |this, val| this.stack[frame].set_local(local, val),
1178 // The cases here can be a bit subtle. Read carefully!
1179 fn write_value_possibly_by_val<F: FnOnce(&mut Self, Value) -> EvalResult<'tcx>>(
1183 old_dest_val: Value,
1185 ) -> EvalResult<'tcx> {
1186 if let Value::ByRef(dest_ptr, align) = old_dest_val {
1187 // If the value is already `ByRef` (that is, backed by an `Allocation`),
1188 // then we must write the new value into this allocation, because there may be
1189 // other pointers into the allocation. These other pointers are logically
1190 // pointers into the local variable, and must be able to observe the change.
1192 // Thus, it would be an error to replace the `ByRef` with a `ByVal`, unless we
1193 // knew for certain that there were no outstanding pointers to this allocation.
1194 self.write_value_to_ptr(src_val, dest_ptr, align, dest_ty)?;
1195 } else if let Value::ByRef(src_ptr, align) = src_val {
1196 // If the value is not `ByRef`, then we know there are no pointers to it
1197 // and we can simply overwrite the `Value` in the locals array directly.
1199 // In this specific case, where the source value is `ByRef`, we must duplicate
1200 // the allocation, because this is a by-value operation. It would be incorrect
1201 // if they referred to the same allocation, since then a change to one would
1202 // implicitly change the other.
1204 // It is a valid optimization to attempt reading a primitive value out of the
1205 // source and write that into the destination without making an allocation, so
1207 if let Ok(Some(src_val)) = self.try_read_value(src_ptr, align, dest_ty) {
1208 write_dest(self, src_val)?;
1210 let layout = self.layout_of(dest_ty)?;
1211 let dest_ptr = self.alloc_ptr(layout)?.into();
1212 self.memory.copy(src_ptr, align.min(layout.align), dest_ptr, layout.align, layout.size, false)?;
1213 write_dest(self, Value::ByRef(dest_ptr, layout.align))?;
1216 // Finally, we have the simple case where neither source nor destination are
1217 // `ByRef`. We may simply copy the source value over the the destintion.
1218 write_dest(self, src_val)?;
1223 pub fn write_value_to_ptr(
1229 ) -> EvalResult<'tcx> {
1230 let layout = self.layout_of(dest_ty)?;
1231 trace!("write_value_to_ptr: {:#?}, {}, {:#?}", value, dest_ty, layout);
1233 Value::ByRef(ptr, align) => {
1234 self.memory.copy(ptr, align.min(layout.align), dest, dest_align.min(layout.align), layout.size, false)
1236 Value::Scalar(scalar) => {
1237 let signed = match layout.abi {
1238 layout::Abi::Scalar(ref scal) => match scal.value {
1239 layout::Primitive::Int(_, signed) => signed,
1243 Scalar::Bits { defined: 0, .. } => false,
1244 _ => bug!("write_value_to_ptr: invalid ByVal layout: {:#?}", layout),
1247 self.memory.write_scalar(dest, dest_align, scalar, layout.size, signed)
1249 Value::ScalarPair(a_val, b_val) => {
1250 trace!("write_value_to_ptr valpair: {:#?}", layout);
1251 let (a, b) = match layout.abi {
1252 layout::Abi::ScalarPair(ref a, ref b) => (&a.value, &b.value),
1253 _ => bug!("write_value_to_ptr: invalid ScalarPair layout: {:#?}", layout)
1255 let (a_size, b_size) = (a.size(&self), b.size(&self));
1257 let b_offset = a_size.abi_align(b.align(&self));
1258 let b_ptr = dest.ptr_offset(b_offset, &self)?.into();
1259 // TODO: What about signedess?
1260 self.memory.write_scalar(a_ptr, dest_align, a_val, a_size, false)?;
1261 self.memory.write_scalar(b_ptr, dest_align, b_val, b_size, false)
1266 pub fn read_value(&self, ptr: Scalar, align: Align, ty: Ty<'tcx>) -> EvalResult<'tcx, Value> {
1267 if let Some(val) = self.try_read_value(ptr, align, ty)? {
1270 bug!("primitive read failed for type: {:?}", ty);
1274 pub(crate) fn read_ptr(
1278 pointee_ty: Ty<'tcx>,
1279 ) -> EvalResult<'tcx, Value> {
1280 let ptr_size = self.memory.pointer_size();
1281 let p: Scalar = self.memory.read_ptr_sized(ptr, ptr_align)?.into();
1282 if self.type_is_sized(pointee_ty) {
1285 trace!("reading fat pointer extra of type {}", pointee_ty);
1286 let extra = ptr.offset(ptr_size, self)?;
1287 match self.tcx.struct_tail(pointee_ty).sty {
1288 ty::TyDynamic(..) => Ok(p.to_value_with_vtable(
1289 self.memory.read_ptr_sized(extra, ptr_align)?.to_ptr()?,
1291 ty::TySlice(..) | ty::TyStr => {
1294 .read_ptr_sized(extra, ptr_align)?
1295 .to_bits(ptr_size)?;
1296 Ok(p.to_value_with_len(len as u64, self.tcx.tcx))
1298 _ => bug!("unsized scalar ptr read from {:?}", pointee_ty),
1307 scalar: &layout::Scalar,
1310 ) -> EvalResult<'tcx> {
1311 trace!("validate scalar: {:#?}, {:#?}, {:#?}, {}", value, size, scalar, ty);
1312 let (lo, hi) = scalar.valid_range.clone().into_inner();
1314 let (bits, defined) = match value {
1315 Scalar::Bits { bits, defined } => (bits, defined),
1317 let ptr_size = self.memory.pointer_size();
1318 let ptr_max = u128::max_value() >> (128 - ptr_size.bits());
1321 // no gap, all values are ok
1323 } else if hi < ptr_max || lo > 1 {
1324 let max = u128::max_value() >> (128 - size.bits());
1325 validation_failure!(
1328 format!("something in the range {:?} or {:?}", 0..=lo, hi..=max)
1333 } else if hi < ptr_max || lo > 1 {
1334 validation_failure!(
1337 format!("something in the range {:?}", scalar.valid_range)
1345 // char gets a special treatment, because its number space is not contiguous so `TyLayout`
1346 // has no special checks for chars
1349 assert_eq!(size.bytes(), 4);
1350 if ::std::char::from_u32(bits as u32).is_none() {
1351 return err!(InvalidChar(bits));
1357 use std::ops::RangeInclusive;
1358 let in_range = |bound: RangeInclusive<u128>| {
1359 defined as u64 >= size.bits() && bound.contains(&bits)
1362 if in_range(0..=hi) || in_range(lo..=u128::max_value()) {
1364 } else if defined as u64 >= size.bits() {
1365 validation_failure!(
1368 format!("something in the range {:?} or {:?}", ..=hi, lo..)
1371 validation_failure!("undefined bytes", path)
1374 if in_range(scalar.valid_range.clone()) {
1376 } else if defined as u64 >= size.bits() {
1377 validation_failure!(
1380 format!("something in the range {:?}", scalar.valid_range)
1383 validation_failure!("undefined bytes", path)
1388 /// This function checks the memory where `ptr` points to.
1389 /// It will error if the bits at the destination do not match the ones described by the layout.
1390 pub fn validate_ptr_target(
1394 mut layout: TyLayout<'tcx>,
1396 seen: &mut FxHashSet<(Pointer, Ty<'tcx>)>,
1397 todo: &mut Vec<(Pointer, Ty<'tcx>, String)>,
1398 ) -> EvalResult<'tcx> {
1399 self.memory.dump_alloc(ptr.alloc_id);
1400 trace!("validate_ptr_target: {:?}, {:#?}", ptr, layout);
1403 match layout.variants {
1404 layout::Variants::NicheFilling { niche: ref tag, .. } |
1405 layout::Variants::Tagged { ref tag, .. } => {
1406 let size = tag.value.size(self);
1407 let (tag_value, tag_layout) = self.read_field(
1408 Value::ByRef(ptr.into(), ptr_align),
1413 let tag_value = self.value_to_scalar(ValTy {
1417 let path = format!("{}.TAG", path);
1418 self.validate_scalar(tag_value, size, tag, &path, tag_layout.ty)?;
1419 let variant_index = self.read_discriminant_as_variant_index(
1420 Place::from_ptr(ptr, ptr_align),
1423 variant = variant_index;
1424 layout = layout.for_variant(self, variant_index);
1425 trace!("variant layout: {:#?}", layout);
1427 layout::Variants::Single { index } => variant = index,
1429 match layout.fields {
1430 // primitives are unions with zero fields
1431 layout::FieldPlacement::Union(0) => {
1433 // nothing to do, whatever the pointer points to, it is never going to be read
1434 layout::Abi::Uninhabited => validation_failure!("a value of an uninhabited type", path),
1435 // check that the scalar is a valid pointer or that its bit range matches the
1437 layout::Abi::Scalar(ref scalar) => {
1438 let size = scalar.value.size(self);
1439 let value = self.memory.read_scalar(ptr, ptr_align, size)?;
1440 self.validate_scalar(value, size, scalar, &path, layout.ty)?;
1441 if scalar.value == Primitive::Pointer {
1442 // ignore integer pointers, we can't reason about the final hardware
1443 if let Scalar::Ptr(ptr) = value {
1444 let alloc_kind = self.tcx.alloc_map.lock().get(ptr.alloc_id);
1445 if let Some(AllocType::Static(did)) = alloc_kind {
1446 // statics from other crates are already checked
1447 // extern statics should not be validated as they have no body
1448 if !did.is_local() || self.tcx.is_foreign_item(did) {
1452 if let Some(tam) = layout.ty.builtin_deref(false) {
1453 // we have not encountered this pointer+layout combination before
1454 if seen.insert((ptr, tam.ty)) {
1455 todo.push((ptr, tam.ty, format!("(*{})", path)))
1462 _ => bug!("bad abi for FieldPlacement::Union(0): {:#?}", layout.abi),
1465 layout::FieldPlacement::Union(_) => {
1466 // We can't check unions, their bits are allowed to be anything.
1467 // The fields don't need to correspond to any bit pattern of the union's fields.
1468 // See https://github.com/rust-lang/rust/issues/32836#issuecomment-406875389
1471 layout::FieldPlacement::Array { stride, count } => {
1472 let elem_layout = layout.field(self, 0)?;
1474 let mut path = path.clone();
1475 self.write_field_name(&mut path, layout.ty, i as usize, variant).unwrap();
1476 self.validate_ptr_target(ptr.offset(stride * i, self)?, ptr_align, elem_layout, path, seen, todo)?;
1480 layout::FieldPlacement::Arbitrary { ref offsets, .. } => {
1482 // check length field and vtable field
1483 match layout.ty.builtin_deref(false).map(|tam| &tam.ty.sty) {
1485 | Some(ty::TySlice(_)) => {
1486 let (len, len_layout) = self.read_field(
1487 Value::ByRef(ptr.into(), ptr_align),
1492 let len = self.value_to_scalar(ValTy { value: len, ty: len_layout.ty })?;
1493 if len.to_bits(len_layout.size).is_err() {
1494 return validation_failure!("length is not a valid integer", path);
1497 Some(ty::TyDynamic(..)) => {
1498 let (vtable, vtable_layout) = self.read_field(
1499 Value::ByRef(ptr.into(), ptr_align),
1504 let vtable = self.value_to_scalar(ValTy { value: vtable, ty: vtable_layout.ty })?;
1505 if vtable.to_ptr().is_err() {
1506 return validation_failure!("vtable address is not a pointer", path);
1511 for (i, &offset) in offsets.iter().enumerate() {
1512 let field_layout = layout.field(self, i)?;
1513 let mut path = path.clone();
1514 self.write_field_name(&mut path, layout.ty, i, variant).unwrap();
1515 self.validate_ptr_target(ptr.offset(offset, self)?, ptr_align, field_layout, path, seen, todo)?;
1522 pub fn try_read_by_ref(&self, mut val: Value, ty: Ty<'tcx>) -> EvalResult<'tcx, Value> {
1523 // Convert to ByVal or ScalarPair if possible
1524 if let Value::ByRef(ptr, align) = val {
1525 if let Some(read_val) = self.try_read_value(ptr, align, ty)? {
1532 pub fn try_read_value(&self, ptr: Scalar, ptr_align: Align, ty: Ty<'tcx>) -> EvalResult<'tcx, Option<Value>> {
1533 let layout = self.layout_of(ty)?;
1534 self.memory.check_align(ptr, ptr_align)?;
1536 if layout.size.bytes() == 0 {
1537 return Ok(Some(Value::Scalar(Scalar::undef())));
1540 let ptr = ptr.to_ptr()?;
1543 layout::Abi::Scalar(..) => {
1544 let scalar = self.memory.read_scalar(ptr, ptr_align, layout.size)?;
1545 Ok(Some(Value::Scalar(scalar)))
1547 layout::Abi::ScalarPair(ref a, ref b) => {
1548 let (a, b) = (&a.value, &b.value);
1549 let (a_size, b_size) = (a.size(self), b.size(self));
1551 let b_offset = a_size.abi_align(b.align(self));
1552 let b_ptr = ptr.offset(b_offset, self)?.into();
1553 let a_val = self.memory.read_scalar(a_ptr, ptr_align, a_size)?;
1554 let b_val = self.memory.read_scalar(b_ptr, ptr_align, b_size)?;
1555 Ok(Some(Value::ScalarPair(a_val, b_val)))
1561 pub fn frame(&self) -> &Frame<'mir, 'tcx> {
1562 self.stack.last().expect("no call frames exist")
1565 pub fn frame_mut(&mut self) -> &mut Frame<'mir, 'tcx> {
1566 self.stack.last_mut().expect("no call frames exist")
1569 pub(super) fn mir(&self) -> &'mir mir::Mir<'tcx> {
1573 pub fn substs(&self) -> &'tcx Substs<'tcx> {
1574 if let Some(frame) = self.stack.last() {
1575 frame.instance.substs
1589 ) -> EvalResult<'tcx> {
1590 // A<Struct> -> A<Trait> conversion
1591 let (src_pointee_ty, dest_pointee_ty) = self.tcx.struct_lockstep_tails(sty, dty);
1593 match (&src_pointee_ty.sty, &dest_pointee_ty.sty) {
1594 (&ty::TyArray(_, length), &ty::TySlice(_)) => {
1595 let ptr = self.into_ptr(src)?;
1596 // u64 cast is from usize to u64, which is always good
1598 value: ptr.to_value_with_len(length.unwrap_usize(self.tcx.tcx), self.tcx.tcx),
1601 self.write_value(valty, dest)
1603 (&ty::TyDynamic(..), &ty::TyDynamic(..)) => {
1604 // For now, upcasts are limited to changes in marker
1605 // traits, and hence never actually require an actual
1606 // change to the vtable.
1611 self.write_value(valty, dest)
1613 (_, &ty::TyDynamic(ref data, _)) => {
1614 let trait_ref = data.principal().unwrap().with_self_ty(
1618 let trait_ref = self.tcx.erase_regions(&trait_ref);
1619 let vtable = self.get_vtable(src_pointee_ty, trait_ref)?;
1620 let ptr = self.into_ptr(src)?;
1622 value: ptr.to_value_with_vtable(vtable),
1625 self.write_value(valty, dest)
1628 _ => bug!("invalid unsizing {:?} -> {:?}", src_ty, dest_ty),
1632 crate fn unsize_into(
1635 src_layout: TyLayout<'tcx>,
1637 dst_layout: TyLayout<'tcx>,
1638 ) -> EvalResult<'tcx> {
1639 match (&src_layout.ty.sty, &dst_layout.ty.sty) {
1640 (&ty::TyRef(_, s, _), &ty::TyRef(_, d, _)) |
1641 (&ty::TyRef(_, s, _), &ty::TyRawPtr(TypeAndMut { ty: d, .. })) |
1642 (&ty::TyRawPtr(TypeAndMut { ty: s, .. }),
1643 &ty::TyRawPtr(TypeAndMut { ty: d, .. })) => {
1644 self.unsize_into_ptr(src, src_layout.ty, dst, dst_layout.ty, s, d)
1646 (&ty::TyAdt(def_a, _), &ty::TyAdt(def_b, _)) => {
1647 assert_eq!(def_a, def_b);
1648 if def_a.is_box() || def_b.is_box() {
1649 if !def_a.is_box() || !def_b.is_box() {
1650 bug!("invalid unsizing between {:?} -> {:?}", src_layout, dst_layout);
1652 return self.unsize_into_ptr(
1657 src_layout.ty.boxed_ty(),
1658 dst_layout.ty.boxed_ty(),
1662 // unsizing of generic struct with pointer fields
1663 // Example: `Arc<T>` -> `Arc<Trait>`
1664 // here we need to increase the size of every &T thin ptr field to a fat ptr
1665 for i in 0..src_layout.fields.count() {
1666 let (dst_f_place, dst_field) =
1667 self.place_field(dst, mir::Field::new(i), dst_layout)?;
1668 if dst_field.is_zst() {
1671 let (src_f_value, src_field) = match src {
1672 Value::ByRef(ptr, align) => {
1673 let src_place = Place::from_scalar_ptr(ptr, align);
1674 let (src_f_place, src_field) =
1675 self.place_field(src_place, mir::Field::new(i), src_layout)?;
1676 (self.read_place(src_f_place)?, src_field)
1678 Value::Scalar(_) | Value::ScalarPair(..) => {
1679 let src_field = src_layout.field(&self, i)?;
1680 assert_eq!(src_layout.fields.offset(i).bytes(), 0);
1681 assert_eq!(src_field.size, src_layout.size);
1685 if src_field.ty == dst_field.ty {
1686 self.write_value(ValTy {
1691 self.unsize_into(src_f_value, src_field, dst_f_place, dst_field)?;
1698 "unsize_into: invalid conversion: {:?} -> {:?}",
1706 pub fn dump_local(&self, place: Place) {
1708 if !log_enabled!(::log::Level::Trace) {
1712 Place::Local { frame, local } => {
1713 let mut allocs = Vec::new();
1714 let mut msg = format!("{:?}", local);
1715 if frame != self.cur_frame() {
1716 write!(msg, " ({} frames up)", self.cur_frame() - frame).unwrap();
1718 write!(msg, ":").unwrap();
1720 match self.stack[frame].get_local(local) {
1722 if let EvalErrorKind::DeadLocal = err.kind {
1723 write!(msg, " is dead").unwrap();
1725 panic!("Failed to access local: {:?}", err);
1728 Ok(Value::ByRef(ptr, align)) => {
1730 Scalar::Ptr(ptr) => {
1731 write!(msg, " by align({}) ref:", align.abi()).unwrap();
1732 allocs.push(ptr.alloc_id);
1734 ptr => write!(msg, " integral by ref: {:?}", ptr).unwrap(),
1737 Ok(Value::Scalar(val)) => {
1738 write!(msg, " {:?}", val).unwrap();
1739 if let Scalar::Ptr(ptr) = val {
1740 allocs.push(ptr.alloc_id);
1743 Ok(Value::ScalarPair(val1, val2)) => {
1744 write!(msg, " ({:?}, {:?})", val1, val2).unwrap();
1745 if let Scalar::Ptr(ptr) = val1 {
1746 allocs.push(ptr.alloc_id);
1748 if let Scalar::Ptr(ptr) = val2 {
1749 allocs.push(ptr.alloc_id);
1755 self.memory.dump_allocs(allocs);
1757 Place::Ptr { ptr, align, .. } => {
1759 Scalar::Ptr(ptr) => {
1760 trace!("by align({}) ref:", align.abi());
1761 self.memory.dump_alloc(ptr.alloc_id);
1763 ptr => trace!(" integral by ref: {:?}", ptr),
1769 /// Convenience function to ensure correct usage of locals
1770 pub fn modify_local<F>(&mut self, frame: usize, local: mir::Local, f: F) -> EvalResult<'tcx>
1772 F: FnOnce(&mut Self, Value) -> EvalResult<'tcx, Value>,
1774 let val = self.stack[frame].get_local(local)?;
1775 let new_val = f(self, val)?;
1776 self.stack[frame].set_local(local, new_val)?;
1777 // FIXME(solson): Run this when setting to Undef? (See previous version of this code.)
1778 // if let Value::ByRef(ptr) = self.stack[frame].get_local(local) {
1779 // self.memory.deallocate(ptr)?;
1784 pub fn generate_stacktrace(&self, explicit_span: Option<Span>) -> (Vec<FrameInfo>, Span) {
1785 let mut last_span = None;
1786 let mut frames = Vec::new();
1787 // skip 1 because the last frame is just the environment of the constant
1788 for &Frame { instance, span, mir, block, stmt, .. } in self.stack().iter().skip(1).rev() {
1789 // make sure we don't emit frames that are duplicates of the previous
1790 if explicit_span == Some(span) {
1791 last_span = Some(span);
1794 if let Some(last) = last_span {
1799 last_span = Some(span);
1801 let location = if self.tcx.def_key(instance.def_id()).disambiguated_data.data == DefPathData::ClosureExpr {
1802 "closure".to_owned()
1804 instance.to_string()
1806 let block = &mir.basic_blocks()[block];
1807 let source_info = if stmt < block.statements.len() {
1808 block.statements[stmt].source_info
1810 block.terminator().source_info
1812 let lint_root = match mir.source_scope_local_data {
1813 mir::ClearCrossCrate::Set(ref ivs) => Some(ivs[source_info.scope].lint_root),
1814 mir::ClearCrossCrate::Clear => None,
1816 frames.push(FrameInfo { span, location, lint_root });
1818 trace!("generate stacktrace: {:#?}, {:?}", frames, explicit_span);
1819 (frames, self.tcx.span)
1822 pub fn sign_extend(&self, value: u128, ty: Ty<'tcx>) -> EvalResult<'tcx, u128> {
1823 super::sign_extend(self.tcx.tcx, value, ty)
1826 pub fn truncate(&self, value: u128, ty: Ty<'tcx>) -> EvalResult<'tcx, u128> {
1827 super::truncate(self.tcx.tcx, value, ty)
1830 fn write_field_name(&self, s: &mut String, ty: Ty<'tcx>, i: usize, variant: usize) -> ::std::fmt::Result {
1840 ty::TyGeneratorWitness(..) |
1842 ty::TyDynamic(..) => {
1843 bug!("field_name({:?}): not applicable", ty)
1846 // Potentially-fat pointers.
1847 ty::TyRef(_, pointee, _) |
1848 ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
1851 // Reuse the fat *T type as its own thin pointer data field.
1852 // This provides information about e.g. DST struct pointees
1853 // (which may have no non-DST form), and will work as long
1854 // as the `Abi` or `FieldPlacement` is checked by users.
1856 return write!(s, ".data_ptr");
1859 match self.tcx.struct_tail(pointee).sty {
1861 ty::TyStr => write!(s, ".len"),
1862 ty::TyDynamic(..) => write!(s, ".vtable_ptr"),
1863 _ => bug!("field_name({:?}): not applicable", ty)
1867 // Arrays and slices.
1870 ty::TyStr => write!(s, "[{}]", i),
1872 // generators and closures.
1873 ty::TyClosure(def_id, _) | ty::TyGenerator(def_id, _, _) => {
1874 let node_id = self.tcx.hir.as_local_node_id(def_id).unwrap();
1875 let freevar = self.tcx.with_freevars(node_id, |fv| fv[i]);
1876 write!(s, ".upvar({})", self.tcx.hir.name(freevar.var_id()))
1879 ty::TyTuple(_) => write!(s, ".{}", i),
1882 ty::TyAdt(def, ..) if def.is_enum() => {
1883 let variant = &def.variants[variant];
1884 write!(s, ".{}::{}", variant.name, variant.fields[i].ident)
1888 ty::TyAdt(def, _) => write!(s, ".{}", def.non_enum_variant().fields[i].ident),
1890 ty::TyProjection(_) | ty::TyAnon(..) | ty::TyParam(_) |
1891 ty::TyInfer(_) | ty::TyError => {
1892 bug!("write_field_name: unexpected type `{}`", ty)
1898 impl<'mir, 'tcx> Frame<'mir, 'tcx> {
1899 pub fn get_local(&self, local: mir::Local) -> EvalResult<'tcx, Value> {
1900 self.locals[local].ok_or_else(|| EvalErrorKind::DeadLocal.into())
1903 fn set_local(&mut self, local: mir::Local, value: Value) -> EvalResult<'tcx> {
1904 match self.locals[local] {
1905 None => err!(DeadLocal),
1906 Some(ref mut local) => {
1913 pub fn storage_live(&mut self, local: mir::Local) -> Option<Value> {
1914 trace!("{:?} is now live", local);
1916 // StorageLive *always* kills the value that's currently stored
1917 mem::replace(&mut self.locals[local], Some(Value::Scalar(Scalar::undef())))
1920 /// Returns the old value of the local
1921 pub fn storage_dead(&mut self, local: mir::Local) -> Option<Value> {
1922 trace!("{:?} is now dead", local);
1924 self.locals[local].take()