1 use rustc::hir::Mutability as TyMutability;
2 use rustc::mir::{self, ValidationOp};
3 use rustc::ty::layout::{Size, Align};
4 use rustc::ty::{self, Ty};
5 use rustc::middle::region::CodeExtent;
6 use rustc_data_structures::indexed_vec::Idx;
7 use syntax::ast::Mutability;
9 use error::{EvalError, EvalResult};
10 use eval_context::EvalContext;
11 use memory::{MemoryPointer, AccessKind};
12 use value::{PrimVal, Pointer, Value};
14 #[derive(Copy, Clone, Debug)]
15 pub enum Lvalue<'tcx> {
16 /// An lvalue referring to a value allocated in the `Memory` system.
18 /// An lvalue may have an invalid (integral or undef) pointer,
19 /// since it might be turned back into a reference
20 /// before ever being dereferenced.
23 /// Remember whether this lvalue is *supposed* to be aligned.
27 /// An lvalue referring to a value on the stack. Represented by a stack frame index paired with
28 /// a Mir local index.
34 /// An lvalue referring to a global
35 Global(GlobalId<'tcx>),
38 #[derive(Copy, Clone, Debug, Eq, PartialEq)]
39 pub enum LvalueExtra {
42 Vtable(MemoryPointer),
43 DowncastVariant(usize),
46 /// Uniquely identifies a specific constant or static.
47 #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
48 pub struct GlobalId<'tcx> {
49 /// For a constant or static, the `Instance` of the item itself.
50 /// For a promoted global, the `Instance` of the function they belong to.
51 pub(super) instance: ty::Instance<'tcx>,
53 /// The index for promoted globals within their function's `Mir`.
54 pub(super) promoted: Option<mir::Promoted>,
57 #[derive(Clone, Debug)]
58 pub struct Global<'tcx> {
59 pub(super) value: Value,
60 /// Only used in `force_allocation` to ensure we don't mark the memory
61 /// before the static is initialized. It is possible to convert a
62 /// global which initially is `Value::ByVal(PrimVal::Undef)` and gets
63 /// lifted to an allocation before the static is fully initialized
64 pub(super) initialized: bool,
65 pub(super) mutable: Mutability,
66 pub(super) ty: Ty<'tcx>,
69 impl<'tcx> Lvalue<'tcx> {
70 /// Produces an Lvalue that will error if attempted to be read from
71 pub fn undef() -> Self {
72 Self::from_primval_ptr(PrimVal::Undef.into())
75 pub(crate) fn from_primval_ptr(ptr: Pointer) -> Self {
76 Lvalue::Ptr { ptr, extra: LvalueExtra::None, aligned: true }
79 pub(crate) fn from_ptr(ptr: MemoryPointer) -> Self {
80 Self::from_primval_ptr(ptr.into())
83 pub(super) fn to_ptr_extra_aligned(self) -> (Pointer, LvalueExtra, bool) {
85 Lvalue::Ptr { ptr, extra, aligned } => (ptr, extra, aligned),
86 _ => bug!("to_ptr_and_extra: expected Lvalue::Ptr, got {:?}", self),
91 pub(super) fn to_ptr(self) -> EvalResult<'tcx, MemoryPointer> {
92 let (ptr, extra, _aligned) = self.to_ptr_extra_aligned();
93 // At this point, we forget about the alignment information -- the lvalue has been turned into a reference,
94 // and no matter where it came from, it now must be aligned.
95 assert_eq!(extra, LvalueExtra::None);
99 pub(super) fn elem_ty_and_len(self, ty: Ty<'tcx>) -> (Ty<'tcx>, u64) {
101 ty::TyArray(elem, n) => (elem, n as u64),
103 ty::TySlice(elem) => {
105 Lvalue::Ptr { extra: LvalueExtra::Length(len), .. } => (elem, len),
106 _ => bug!("elem_ty_and_len of a TySlice given non-slice lvalue: {:?}", self),
110 _ => bug!("elem_ty_and_len expected array or slice, got {:?}", ty),
115 impl<'tcx> Global<'tcx> {
116 pub(super) fn uninitialized(ty: Ty<'tcx>) -> Self {
118 value: Value::ByVal(PrimVal::Undef),
119 mutable: Mutability::Mutable,
125 pub(super) fn initialized(ty: Ty<'tcx>, value: Value, mutable: Mutability) -> Self {
135 impl<'a, 'tcx> EvalContext<'a, 'tcx> {
136 /// Reads a value from the lvalue without going through the intermediate step of obtaining
138 pub fn try_read_lvalue(&mut self, lvalue: &mir::Lvalue<'tcx>) -> EvalResult<'tcx, Option<Value>> {
139 use rustc::mir::Lvalue::*;
141 // Might allow this in the future, right now there's no way to do this from Rust code anyway
142 Local(mir::RETURN_POINTER) => Err(EvalError::ReadFromReturnPointer),
143 // Directly reading a local will always succeed
144 Local(local) => self.frame().get_local(local).map(Some),
145 // Directly reading a static will always succeed
146 Static(ref static_) => {
147 let instance = ty::Instance::mono(self.tcx, static_.def_id);
148 let cid = GlobalId { instance, promoted: None };
149 Ok(Some(self.globals.get(&cid).expect("global not cached").value))
151 Projection(ref proj) => self.try_read_lvalue_projection(proj),
155 fn try_read_lvalue_projection(&mut self, proj: &mir::LvalueProjection<'tcx>) -> EvalResult<'tcx, Option<Value>> {
156 use rustc::mir::ProjectionElem::*;
157 let base = match self.try_read_lvalue(&proj.base)? {
159 None => return Ok(None),
161 let base_ty = self.lvalue_ty(&proj.base);
163 Field(field, _) => match (field.index(), base) {
164 // the only field of a struct
165 (0, Value::ByVal(val)) => Ok(Some(Value::ByVal(val))),
166 // split fat pointers, 2 element tuples, ...
167 (0...1, Value::ByValPair(a, b)) if self.get_field_count(base_ty)? == 2 => {
168 let val = [a, b][field.index()];
169 Ok(Some(Value::ByVal(val)))
171 // the only field of a struct is a fat pointer
172 (0, Value::ByValPair(..)) => Ok(Some(base)),
175 // The NullablePointer cases should work fine, need to take care for normal enums
178 // reading index 0 or index 1 from a ByVal or ByVal pair could be optimized
179 ConstantIndex { .. } | Index(_) |
180 // No way to optimize this projection any better than the normal lvalue path
185 /// Returns a value and (in case of a ByRef) if we are supposed to use aligned accesses.
186 pub(super) fn eval_and_read_lvalue(&mut self, lvalue: &mir::Lvalue<'tcx>) -> EvalResult<'tcx, Value> {
187 // Shortcut for things like accessing a fat pointer's field,
188 // which would otherwise (in the `eval_lvalue` path) require moving a `ByValPair` to memory
189 // and returning an `Lvalue::Ptr` to it
190 if let Some(val) = self.try_read_lvalue(lvalue)? {
193 let lvalue = self.eval_lvalue(lvalue)?;
194 self.read_lvalue(lvalue)
197 pub fn read_lvalue(&self, lvalue: Lvalue<'tcx>) -> EvalResult<'tcx, Value> {
199 Lvalue::Ptr { ptr, extra, aligned } => {
200 assert_eq!(extra, LvalueExtra::None);
201 Ok(Value::ByRef(ptr, aligned))
203 Lvalue::Local { frame, local } => {
204 self.stack[frame].get_local(local)
206 Lvalue::Global(cid) => {
207 Ok(self.globals.get(&cid).expect("global not cached").value)
212 pub(super) fn eval_lvalue(&mut self, mir_lvalue: &mir::Lvalue<'tcx>) -> EvalResult<'tcx, Lvalue<'tcx>> {
213 use rustc::mir::Lvalue::*;
214 let lvalue = match *mir_lvalue {
215 Local(mir::RETURN_POINTER) => self.frame().return_lvalue,
216 Local(local) => Lvalue::Local { frame: self.cur_frame(), local },
218 Static(ref static_) => {
219 let instance = ty::Instance::mono(self.tcx, static_.def_id);
220 Lvalue::Global(GlobalId { instance, promoted: None })
223 Projection(ref proj) => {
224 let ty = self.lvalue_ty(&proj.base);
225 let lvalue = self.eval_lvalue(&proj.base)?;
226 return self.eval_lvalue_projection(lvalue, ty, &proj.elem);
230 if log_enabled!(::log::LogLevel::Trace) {
231 self.dump_local(lvalue);
243 ) -> EvalResult<'tcx, Lvalue<'tcx>> {
244 let base_layout = self.type_layout(base_ty)?;
245 use rustc::ty::layout::Layout::*;
246 let (offset, packed) = match *base_layout {
247 Univariant { ref variant, .. } => {
248 (variant.offsets[field_index], variant.packed)
251 General { ref variants, .. } => {
252 let (_, base_extra, _) = base.to_ptr_extra_aligned();
253 if let LvalueExtra::DowncastVariant(variant_idx) = base_extra {
254 // +1 for the discriminant, which is field 0
255 (variants[variant_idx].offsets[field_index + 1], variants[variant_idx].packed)
257 bug!("field access on enum had no variant index");
261 RawNullablePointer { .. } => {
262 assert_eq!(field_index, 0);
266 StructWrappedNullablePointer { ref nonnull, .. } => {
267 (nonnull.offsets[field_index], nonnull.packed)
270 UntaggedUnion { .. } => return Ok(base),
272 Vector { element, count } => {
273 let field = field_index as u64;
274 assert!(field < count);
275 let elem_size = element.size(&self.tcx.data_layout).bytes();
276 (Size::from_bytes(field * elem_size), false)
279 // We treat arrays + fixed sized indexing like field accesses
281 let field = field_index as u64;
282 let elem_size = match base_ty.sty {
283 ty::TyArray(elem_ty, n) => {
284 assert!(field < n as u64);
285 self.type_size(elem_ty)?.expect("array elements are sized") as u64
287 _ => bug!("lvalue_field: got Array layout but non-array type {:?}", base_ty),
289 (Size::from_bytes(field * elem_size), false)
292 FatPointer { .. } => {
293 let bytes = field_index as u64 * self.memory.pointer_size();
294 let offset = Size::from_bytes(bytes);
298 _ => bug!("field access on non-product type: {:?}", base_layout),
301 // Do not allocate in trivial cases
302 let (base_ptr, base_extra, aligned) = match base {
303 Lvalue::Ptr { ptr, extra, aligned } => (ptr, extra, aligned),
304 Lvalue::Local { frame, local } => match self.stack[frame].get_local(local)? {
305 // in case the type has a single field, just return the value
306 Value::ByVal(_) if self.get_field_count(base_ty).map(|c| c == 1).unwrap_or(false) => {
307 assert_eq!(offset.bytes(), 0, "ByVal can only have 1 non zst field with offset 0");
311 Value::ByValPair(..) |
312 Value::ByVal(_) => self.force_allocation(base)?.to_ptr_extra_aligned(),
314 Lvalue::Global(cid) => match self.globals.get(&cid).expect("uncached global").value {
315 // in case the type has a single field, just return the value
316 Value::ByVal(_) if self.get_field_count(base_ty).map(|c| c == 1).unwrap_or(false) => {
317 assert_eq!(offset.bytes(), 0, "ByVal can only have 1 non zst field with offset 0");
321 Value::ByValPair(..) |
322 Value::ByVal(_) => self.force_allocation(base)?.to_ptr_extra_aligned(),
326 let offset = match base_extra {
327 LvalueExtra::Vtable(tab) => {
328 let (_, align) = self.size_and_align_of_dst(base_ty, base_ptr.to_value_with_vtable(tab))?;
329 offset.abi_align(Align::from_bytes(align, align).unwrap()).bytes()
334 let ptr = base_ptr.offset(offset, &self)?;
336 let field_ty = self.monomorphize(field_ty, self.substs());
338 let extra = if self.type_is_sized(field_ty) {
342 LvalueExtra::None => bug!("expected fat pointer"),
343 LvalueExtra::DowncastVariant(..) =>
344 bug!("Rust doesn't support unsized fields in enum variants"),
345 LvalueExtra::Vtable(_) |
346 LvalueExtra::Length(_) => {},
351 Ok(Lvalue::Ptr { ptr, extra, aligned: aligned && !packed })
354 fn val_to_lvalue(&mut self, val: Value, ty: Ty<'tcx>) -> EvalResult<'tcx, Lvalue<'tcx>> {
355 Ok(match self.tcx.struct_tail(ty).sty {
356 ty::TyDynamic(..) => {
357 let (ptr, vtable) = val.into_ptr_vtable_pair(&mut self.memory)?;
358 Lvalue::Ptr { ptr, extra: LvalueExtra::Vtable(vtable), aligned: true }
360 ty::TyStr | ty::TySlice(_) => {
361 let (ptr, len) = val.into_slice(&mut self.memory)?;
362 Lvalue::Ptr { ptr, extra: LvalueExtra::Length(len), aligned: true }
364 _ => Lvalue::Ptr { ptr: val.into_ptr(&mut self.memory)?, extra: LvalueExtra::None, aligned: true },
368 fn lvalue_index(&mut self, base: Lvalue<'tcx>, outer_ty: Ty<'tcx>, n: u64) -> EvalResult<'tcx, Lvalue<'tcx>> {
369 // Taking the outer type here may seem odd; it's needed because for array types, the outer type gives away the length.
370 let base = self.force_allocation(base)?;
371 let (base_ptr, _, aligned) = base.to_ptr_extra_aligned();
373 let (elem_ty, len) = base.elem_ty_and_len(outer_ty);
374 let elem_size = self.type_size(elem_ty)?.expect("slice element must be sized");
375 assert!(n < len, "Tried to access element {} of array/slice with length {}", n, len);
376 let ptr = base_ptr.offset(n * elem_size, self.memory.layout)?;
377 Ok(Lvalue::Ptr { ptr, extra: LvalueExtra::None, aligned })
380 fn eval_lvalue_projection(
384 proj_elem: &mir::ProjectionElem<'tcx, mir::Operand<'tcx>>,
385 ) -> EvalResult<'tcx, Lvalue<'tcx>> {
386 use rustc::mir::ProjectionElem::*;
387 let (ptr, extra, aligned) = match *proj_elem {
388 Field(field, field_ty) => {
389 return self.lvalue_field(base, field.index(), base_ty, field_ty);
392 Downcast(_, variant) => {
393 let base_layout = self.type_layout(base_ty)?;
395 let base = self.force_allocation(base)?;
396 let (base_ptr, base_extra, aligned) = base.to_ptr_extra_aligned();
398 use rustc::ty::layout::Layout::*;
399 let extra = match *base_layout {
400 General { .. } => LvalueExtra::DowncastVariant(variant),
401 RawNullablePointer { .. } | StructWrappedNullablePointer { .. } => base_extra,
402 _ => bug!("variant downcast on non-aggregate: {:?}", base_layout),
404 (base_ptr, extra, aligned)
408 let val = self.read_lvalue(base)?;
410 let pointee_type = match base_ty.sty {
411 ty::TyRawPtr(ref tam) |
412 ty::TyRef(_, ref tam) => tam.ty,
413 ty::TyAdt(def, _) if def.is_box() => base_ty.boxed_ty(),
414 _ => bug!("can only deref pointer types"),
417 trace!("deref to {} on {:?}", pointee_type, val);
419 return self.val_to_lvalue(val, pointee_type);
422 Index(ref operand) => {
424 let n_ptr = self.eval_operand(operand)?;
425 let usize = self.tcx.types.usize;
426 let n = self.value_to_primval(n_ptr, usize)?.to_u64()?;
427 return self.lvalue_index(base, base_ty, n);
430 ConstantIndex { offset, min_length, from_end } => {
432 let base = self.force_allocation(base)?;
433 let (base_ptr, _, aligned) = base.to_ptr_extra_aligned();
435 let (elem_ty, n) = base.elem_ty_and_len(base_ty);
436 let elem_size = self.type_size(elem_ty)?.expect("sequence element must be sized");
437 assert!(n >= min_length as u64);
439 let index = if from_end {
440 n - u64::from(offset)
445 let ptr = base_ptr.offset(index * elem_size, &self)?;
446 (ptr, LvalueExtra::None, aligned)
449 Subslice { from, to } => {
451 let base = self.force_allocation(base)?;
452 let (base_ptr, _, aligned) = base.to_ptr_extra_aligned();
454 let (elem_ty, n) = base.elem_ty_and_len(base_ty);
455 let elem_size = self.type_size(elem_ty)?.expect("slice element must be sized");
456 assert!(u64::from(from) <= n - u64::from(to));
457 let ptr = base_ptr.offset(u64::from(from) * elem_size, &self)?;
458 let extra = LvalueExtra::Length(n - u64::from(to) - u64::from(from));
459 (ptr, extra, aligned)
463 Ok(Lvalue::Ptr { ptr, extra, aligned })
466 pub(super) fn lvalue_ty(&self, lvalue: &mir::Lvalue<'tcx>) -> Ty<'tcx> {
467 self.monomorphize(lvalue.ty(self.mir(), self.tcx).to_ty(self.tcx), self.substs())
472 #[derive(Copy, Clone, Debug)]
473 pub struct ValidationCtx {
475 region: Option<CodeExtent>,
480 pub fn new(op: ValidationOp) -> Self {
482 op, region: None, mutbl: TyMutability::MutMutable,
487 impl<'a, 'tcx> EvalContext<'a, 'tcx> {
490 lvalue: Lvalue<'tcx>,
492 variant: &ty::VariantDef,
493 subst: &ty::subst::Substs<'tcx>,
495 ) -> EvalResult<'tcx> {
496 // TODO: Take visibility/privacy into account.
497 for (idx, field) in variant.fields.iter().enumerate() {
498 let field_ty = field.ty(self.tcx, subst);
499 let field_lvalue = self.lvalue_field(lvalue, idx, ty, field_ty)?;
500 self.validate(field_lvalue, field_ty, vctx)?;
505 fn validate_ptr(&mut self, val: Value, pointee_ty: Ty<'tcx>, vctx: ValidationCtx) -> EvalResult<'tcx> {
506 // Check alignment and non-NULLness
507 let (_, align) = self.size_and_align_of_dst(pointee_ty, val)?;
508 let ptr = val.into_ptr(&mut self.memory)?;
509 self.memory.check_align(ptr, align)?;
512 let pointee_lvalue = self.val_to_lvalue(val, pointee_ty)?;
513 self.validate(pointee_lvalue, pointee_ty, vctx)
516 /// Validate the lvalue at the given type. If `release` is true, just do a release of all write locks
517 pub(super) fn validate(&mut self, lvalue: Lvalue<'tcx>, ty: Ty<'tcx>, mut vctx: ValidationCtx) -> EvalResult<'tcx>
519 use rustc::ty::TypeVariants::*;
520 use rustc::ty::RegionKind::*;
521 use rustc::ty::AdtKind;
522 use self::TyMutability::*;
524 trace!("Validating {:?} at type {}, context {:?}", lvalue, ty, vctx);
526 // Decide whether this type *owns* the memory it covers (like integers), or whether it
527 // just assembles pieces (that each own their memory) together to a larger whole.
528 // TODO: Currently, we don't acquire locks for padding and discriminants. We should.
529 let is_owning = match ty.sty {
530 TyInt(_) | TyUint(_) | TyRawPtr(_) |
531 TyBool | TyFloat(_) | TyChar | TyStr |
532 TyRef(..) | TyFnPtr(..) | TyNever => true,
533 TyAdt(adt, _) if adt.is_box() => true,
534 TySlice(_) | TyAdt(_, _) | TyTuple(..) | TyClosure(..) | TyArray(..) => false,
535 TyParam(_) | TyInfer(_) => bug!("I got an incomplete type for validation"),
536 _ => return Err(EvalError::Unimplemented(format!("Unimplemented type encountered when checking validity."))),
540 Lvalue::Ptr { ptr, extra, aligned: _ } => {
541 // Determine the size
542 // FIXME: Can we reuse size_and_align_of_dst for Lvalues?
543 let len = match self.type_size(ty)? {
545 assert_eq!(extra, LvalueExtra::None, "Got a fat ptr to a sized type");
549 // The only unsized typ we concider "owning" is TyStr.
550 assert_eq!(ty.sty, TyStr, "Found a surprising unsized owning type");
551 // The extra must be the length, in bytes.
553 LvalueExtra::Length(len) => len,
554 _ => bug!("TyStr must have a length as extra"),
560 let ptr = ptr.to_ptr()?;
561 let access = match vctx.mutbl { MutMutable => AccessKind::Write, MutImmutable => AccessKind::Read };
563 ValidationOp::Acquire => self.memory.acquire_lock(ptr, len, vctx.region, access)?,
564 ValidationOp::Release => self.memory.release_write_lock_until(ptr, len, None)?,
565 ValidationOp::Suspend(region) => self.memory.release_write_lock_until(ptr, len, Some(region))?,
569 Lvalue::Local { ..} | Lvalue::Global(..) => {
570 // These are not backed by memory, so we have nothing to do.
576 TyInt(_) | TyUint(_) | TyRawPtr(_) => {
577 // TODO: Make sure these are not undef.
578 // We could do a bounds-check and other sanity checks on the lvalue, but it would be a bug in miri for this to ever fail.
581 TyBool | TyFloat(_) | TyChar | TyStr => {
582 // TODO: Check if these are valid bool/float/codepoint/UTF-8, respectively (and in particular, not undef).
586 Err(EvalError::ValidationFailure(format!("The empty type is never valid.")))
588 TyRef(region, ty::TypeAndMut { ty: pointee_ty, mutbl }) => {
589 let val = self.read_lvalue(lvalue)?;
590 // Sharing restricts our context
591 if mutbl == MutImmutable {
592 // Actually, in case of releasing-validation, this means we are done.
593 if vctx.op != ValidationOp::Acquire {
596 vctx.mutbl = MutImmutable;
598 // Inner lifetimes *outlive* outer ones, so only if we have no lifetime restriction yet,
599 // we record the region of this borrow to the context.
600 if vctx.region == None {
602 ReScope(ce) => vctx.region = Some(ce),
603 // It is possible for us to encode erased lifetimes here because the lifetimes in
604 // this functions' Subst will be erased.
608 self.validate_ptr(val, pointee_ty, vctx)
610 TyAdt(adt, _) if adt.is_box() => {
611 let val = self.read_lvalue(lvalue)?;
612 self.validate_ptr(val, ty.boxed_ty(), vctx)
615 // TODO: The function names here could need some improvement.
616 let ptr = self.read_lvalue(lvalue)?.into_ptr(&mut self.memory)?.to_ptr()?;
617 self.memory.get_fn(ptr)?;
618 // TODO: Check if the signature matches (should be the same check as what terminator/mod.rs already does on call?).
623 TySlice(elem_ty) => {
624 let len = match lvalue {
625 Lvalue::Ptr { extra: LvalueExtra::Length(len), .. } => len,
626 _ => bug!("acquire_valid of a TySlice given non-slice lvalue: {:?}", lvalue),
629 let inner_lvalue = self.lvalue_index(lvalue, ty, i)?;
630 self.validate(inner_lvalue, elem_ty, vctx)?;
634 TyArray(elem_ty, len) => {
636 let inner_lvalue = self.lvalue_index(lvalue, ty, i as u64)?;
637 self.validate(inner_lvalue, elem_ty, vctx)?;
641 TyAdt(adt, subst) => {
642 match adt.adt_kind() {
644 // TODO: Can we get the discriminant without forcing an allocation?
645 let ptr = self.force_allocation(lvalue)?.to_ptr()?;
646 let discr = self.read_discriminant_value(ptr, ty)?;
648 // Get variant index for discriminant
649 let variant_idx = adt.discriminants(self.tcx)
650 .position(|variant_discr| variant_discr.to_u128_unchecked() == discr)
651 .ok_or(EvalError::InvalidDiscriminant)?;
652 let variant = &adt.variants[variant_idx];
654 if variant.fields.len() > 0 {
655 // Downcast to this variant
656 let lvalue = self.eval_lvalue_projection(lvalue, ty, &mir::ProjectionElem::Downcast(adt, variant_idx))?;
658 // Recursively validate the fields
659 self.validate_variant(lvalue, ty, variant, subst, vctx)
661 // No fields, nothing left to check. Downcasting may fail, e.g. in case of a CEnum.
666 self.validate_variant(lvalue, ty, adt.struct_variant(), subst, vctx)
669 // No guarantees are provided for union types.
670 // TODO: Make sure that all access to union fields is unsafe; otherwise, we may have some checking to do (but what exactly?)
675 TyTuple(ref types, _) => {
676 for (idx, field_ty) in types.iter().enumerate() {
677 let field_lvalue = self.lvalue_field(lvalue, idx, ty, field_ty)?;
678 self.validate(field_lvalue, field_ty, vctx)?;
682 TyClosure(def_id, ref closure_substs) => {
683 for (idx, field_ty) in closure_substs.upvar_tys(def_id, self.tcx).enumerate() {
684 let field_lvalue = self.lvalue_field(lvalue, idx, ty, field_ty)?;
685 self.validate(field_lvalue, field_ty, vctx)?;
687 // TODO: Check if the signature matches (should be the same check as what terminator/mod.rs already does on call?).
688 // Is there other things we can/should check? Like vtable pointers?
691 _ => bug!("We already establishd that this is a type we support.")