1 use super::{FnVal, InterpCx, Machine, MemoryKind};
3 use rustc::mir::interpret::{InterpResult, Pointer, PointerArithmetic, Scalar};
4 use rustc::ty::layout::{Align, HasDataLayout, LayoutOf, Size};
5 use rustc::ty::{self, Instance, Ty, TypeFoldable};
7 impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
8 /// Creates a dynamic vtable for the given type and vtable origin. This is used only for
11 /// The `trait_ref` encodes the erased self type. Hence, if we are
12 /// making an object `Foo<Trait>` from a value of type `Foo<T>`, then
13 /// `trait_ref` would map `T: Trait`.
17 poly_trait_ref: Option<ty::PolyExistentialTraitRef<'tcx>>,
18 ) -> InterpResult<'tcx, Pointer<M::PointerTag>> {
19 trace!("get_vtable(trait_ref={:?})", poly_trait_ref);
21 let (ty, poly_trait_ref) = self.tcx.erase_regions(&(ty, poly_trait_ref));
23 // All vtables must be monomorphic, bail out otherwise.
24 if ty.needs_subst() || poly_trait_ref.needs_subst() {
25 throw_inval!(TooGeneric);
28 if let Some(&vtable) = self.vtables.get(&(ty, poly_trait_ref)) {
29 // This means we guarantee that there are no duplicate vtables, we will
30 // always use the same vtable for the same (Type, Trait) combination.
31 // That's not what happens in rustc, but emulating per-crate deduplication
32 // does not sound like it actually makes anything any better.
36 let methods = if let Some(poly_trait_ref) = poly_trait_ref {
37 let trait_ref = poly_trait_ref.with_self_ty(*self.tcx, ty);
38 let trait_ref = self.tcx.erase_regions(&trait_ref);
40 self.tcx.vtable_methods(trait_ref)
45 let layout = self.layout_of(ty)?;
46 assert!(!layout.is_unsized(), "can't create a vtable for an unsized type");
47 let size = layout.size.bytes();
48 let align = layout.align.abi.bytes();
50 let ptr_size = self.pointer_size();
51 let ptr_align = self.tcx.data_layout.pointer_align.abi;
52 // /////////////////////////////////////////////////////////////////////////////////////////
53 // If you touch this code, be sure to also make the corresponding changes to
54 // `get_vtable` in `rust_codegen_llvm/meth.rs`.
55 // /////////////////////////////////////////////////////////////////////////////////////////
56 let vtable = self.memory.allocate(
57 ptr_size * (3 + methods.len() as u64),
63 let drop = Instance::resolve_drop_in_place(*tcx, ty);
64 let drop = self.memory.create_fn_alloc(FnVal::Instance(drop));
66 // No need to do any alignment checks on the memory accesses below, because we know the
67 // allocation is correctly aligned as we created it above. Also we're only offsetting by
68 // multiples of `ptr_align`, which means that it will stay aligned to `ptr_align`.
69 let vtable_alloc = self.memory.get_raw_mut(vtable.alloc_id)?;
70 vtable_alloc.write_ptr_sized(tcx, vtable, drop.into())?;
72 let size_ptr = vtable.offset(ptr_size, tcx)?;
73 vtable_alloc.write_ptr_sized(tcx, size_ptr, Scalar::from_uint(size, ptr_size).into())?;
74 let align_ptr = vtable.offset(ptr_size * 2, tcx)?;
75 vtable_alloc.write_ptr_sized(tcx, align_ptr, Scalar::from_uint(align, ptr_size).into())?;
77 for (i, method) in methods.iter().enumerate() {
78 if let Some((def_id, substs)) = *method {
79 // resolve for vtable: insert shims where needed
81 ty::Instance::resolve_for_vtable(*tcx, self.param_env, def_id, substs)
82 .ok_or_else(|| err_inval!(TooGeneric))?;
83 let fn_ptr = self.memory.create_fn_alloc(FnVal::Instance(instance));
84 // We cannot use `vtable_allic` as we are creating fn ptrs in this loop.
85 let method_ptr = vtable.offset(ptr_size * (3 + i as u64), tcx)?;
86 self.memory.get_raw_mut(vtable.alloc_id)?.write_ptr_sized(
94 self.memory.mark_immutable(vtable.alloc_id)?;
95 assert!(self.vtables.insert((ty, poly_trait_ref), vtable).is_none());
100 /// Resolves the function at the specified slot in the provided
101 /// vtable. An index of '0' corresponds to the first method
102 /// declared in the trait of the provided vtable.
103 pub fn get_vtable_slot(
105 vtable: Scalar<M::PointerTag>,
107 ) -> InterpResult<'tcx, FnVal<'tcx, M::ExtraFnVal>> {
108 let ptr_size = self.pointer_size();
109 // Skip over the 'drop_ptr', 'size', and 'align' fields.
110 let vtable_slot = vtable.ptr_offset(ptr_size * (idx as u64 + 3), self)?;
111 let vtable_slot = self
113 .check_ptr_access(vtable_slot, ptr_size, self.tcx.data_layout.pointer_align.abi)?
114 .expect("cannot be a ZST");
117 .get_raw(vtable_slot.alloc_id)?
118 .read_ptr_sized(self, vtable_slot)?
120 Ok(self.memory.get_fn(fn_ptr)?)
123 /// Returns the drop fn instance as well as the actual dynamic type.
124 pub fn read_drop_type_from_vtable(
126 vtable: Scalar<M::PointerTag>,
127 ) -> InterpResult<'tcx, (ty::Instance<'tcx>, Ty<'tcx>)> {
128 // We don't care about the pointee type; we just want a pointer.
133 self.tcx.data_layout.pointer_size,
134 self.tcx.data_layout.pointer_align.abi,
136 .expect("cannot be a ZST");
138 self.memory.get_raw(vtable.alloc_id)?.read_ptr_sized(self, vtable)?.not_undef()?;
139 // We *need* an instance here, no other kind of function value, to be able
140 // to determine the type.
141 let drop_instance = self.memory.get_fn(drop_fn)?.as_instance()?;
142 trace!("Found drop fn: {:?}", drop_instance);
143 let fn_sig = drop_instance.ty_env(*self.tcx, self.param_env).fn_sig(*self.tcx);
144 let fn_sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, &fn_sig);
145 // The drop function takes `*mut T` where `T` is the type being dropped, so get that.
146 let args = fn_sig.inputs();
148 throw_ub_format!("drop fn should have 1 argument, but signature is {:?}", fn_sig);
153 err_ub_format!("drop fn argument type {} is not a pointer type", args[0])
156 Ok((drop_instance, ty))
159 pub fn read_size_and_align_from_vtable(
161 vtable: Scalar<M::PointerTag>,
162 ) -> InterpResult<'tcx, (Size, Align)> {
163 let pointer_size = self.pointer_size();
164 // We check for `size = 3 * ptr_size`, which covers the drop fn (unused here),
165 // the size, and the align (which we read below).
168 .check_ptr_access(vtable, 3 * pointer_size, self.tcx.data_layout.pointer_align.abi)?
169 .expect("cannot be a ZST");
170 let alloc = self.memory.get_raw(vtable.alloc_id)?;
171 let size = alloc.read_ptr_sized(self, vtable.offset(pointer_size, self)?)?.not_undef()?;
172 let size = self.force_bits(size, pointer_size)? as u64;
174 alloc.read_ptr_sized(self, vtable.offset(pointer_size * 2, self)?)?.not_undef()?;
175 let align = self.force_bits(align, pointer_size)? as u64;
177 if size >= self.tcx.data_layout().obj_size_bound() {
180 size is bigger than largest supported object"
183 Ok((Size::from_bytes(size), Align::from_bytes(align).unwrap()))