1 use rustc::ty::{self, Ty, Instance};
2 use rustc::ty::layout::{Size, Align, LayoutOf};
3 use rustc::mir::interpret::{Scalar, Pointer, EvalResult, PointerArithmetic};
5 use super::{InterpretCx, InterpError, Machine, MemoryKind};
7 impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> InterpretCx<'a, 'mir, 'tcx, M> {
8 /// Creates a dynamic vtable for the given type and vtable origin. This is used only for
11 /// The `trait_ref` encodes the erased self type. Hence if we are
12 /// making an object `Foo<Trait>` from a value of type `Foo<T>`, then
13 /// `trait_ref` would map `T:Trait`.
17 poly_trait_ref: Option<ty::PolyExistentialTraitRef<'tcx>>,
18 ) -> EvalResult<'tcx, Pointer<M::PointerTag>> {
19 trace!("get_vtable(trait_ref={:?})", poly_trait_ref);
21 let (ty, poly_trait_ref) = self.tcx.erase_regions(&(ty, poly_trait_ref));
23 if let Some(&vtable) = self.vtables.get(&(ty, poly_trait_ref)) {
24 // This means we guarantee that there are no duplicate vtables, we will
25 // always use the same vtable for the same (Type, Trait) combination.
26 // That's not what happens in rustc, but emulating per-crate deduplication
27 // does not sound like it actually makes anything any better.
31 let methods = if let Some(poly_trait_ref) = poly_trait_ref {
32 let trait_ref = poly_trait_ref.with_self_ty(*self.tcx, ty);
33 let trait_ref = self.tcx.erase_regions(&trait_ref);
35 self.tcx.vtable_methods(trait_ref)
40 let layout = self.layout_of(ty)?;
41 assert!(!layout.is_unsized(), "can't create a vtable for an unsized type");
42 let size = layout.size.bytes();
43 let align = layout.align.abi.bytes();
45 let ptr_size = self.pointer_size();
46 let ptr_align = self.tcx.data_layout.pointer_align.abi;
47 // /////////////////////////////////////////////////////////////////////////////////////////
48 // If you touch this code, be sure to also make the corresponding changes to
49 // `get_vtable` in rust_codegen_llvm/meth.rs
50 // /////////////////////////////////////////////////////////////////////////////////////////
51 let vtable = self.memory.allocate(
52 ptr_size * (3 + methods.len() as u64),
58 let drop = Instance::resolve_drop_in_place(*tcx, ty);
59 let drop = self.memory.create_fn_alloc(drop);
61 // no need to do any alignment checks on the memory accesses below, because we know the
62 // allocation is correctly aligned as we created it above. Also we're only offsetting by
63 // multiples of `ptr_align`, which means that it will stay aligned to `ptr_align`.
65 .get_mut(vtable.alloc_id)?
66 .write_ptr_sized(tcx, vtable, Scalar::Ptr(drop).into())?;
68 let size_ptr = vtable.offset(ptr_size, self)?;
70 .get_mut(size_ptr.alloc_id)?
71 .write_ptr_sized(tcx, size_ptr, Scalar::from_uint(size, ptr_size).into())?;
72 let align_ptr = vtable.offset(ptr_size * 2, self)?;
74 .get_mut(align_ptr.alloc_id)?
75 .write_ptr_sized(tcx, align_ptr, Scalar::from_uint(align, ptr_size).into())?;
77 for (i, method) in methods.iter().enumerate() {
78 if let Some((def_id, substs)) = *method {
79 // resolve for vtable: insert shims where needed
80 let substs = self.subst_and_normalize_erasing_regions(substs)?;
81 let instance = ty::Instance::resolve_for_vtable(
86 ).ok_or_else(|| InterpError::TooGeneric)?;
87 let fn_ptr = self.memory.create_fn_alloc(instance);
88 let method_ptr = vtable.offset(ptr_size * (3 + i as u64), self)?;
90 .get_mut(method_ptr.alloc_id)?
91 .write_ptr_sized(tcx, method_ptr, Scalar::Ptr(fn_ptr).into())?;
95 self.memory.mark_immutable(vtable.alloc_id)?;
96 assert!(self.vtables.insert((ty, poly_trait_ref), vtable).is_none());
101 /// Returns the drop fn instance as well as the actual dynamic type
102 pub fn read_drop_type_from_vtable(
104 vtable: Pointer<M::PointerTag>,
105 ) -> EvalResult<'tcx, (ty::Instance<'tcx>, Ty<'tcx>)> {
106 // we don't care about the pointee type, we just want a pointer
107 self.memory.check_align(vtable.into(), self.tcx.data_layout.pointer_align.abi)?;
108 let drop_fn = self.memory
109 .get(vtable.alloc_id)?
110 .read_ptr_sized(self, vtable)?
112 let drop_instance = self.memory.get_fn(drop_fn)?;
113 trace!("Found drop fn: {:?}", drop_instance);
114 let fn_sig = drop_instance.ty(*self.tcx).fn_sig(*self.tcx);
115 let fn_sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, &fn_sig);
116 // the drop function takes *mut T where T is the type being dropped, so get that
117 let ty = fn_sig.inputs()[0].builtin_deref(true).unwrap().ty;
118 Ok((drop_instance, ty))
121 pub fn read_size_and_align_from_vtable(
123 vtable: Pointer<M::PointerTag>,
124 ) -> EvalResult<'tcx, (Size, Align)> {
125 let pointer_size = self.pointer_size();
126 self.memory.check_align(vtable.into(), self.tcx.data_layout.pointer_align.abi)?;
127 let alloc = self.memory.get(vtable.alloc_id)?;
128 let size = alloc.read_ptr_sized(self, vtable.offset(pointer_size, self)?)?
129 .to_bits(pointer_size)? as u64;
130 let align = alloc.read_ptr_sized(
132 vtable.offset(pointer_size * 2, self)?,
133 )?.to_bits(pointer_size)? as u64;
134 Ok((Size::from_bytes(size), Align::from_bytes(align).unwrap()))