1 //! Functions concerning immediate values and operands, and reading from operands.
2 //! All high-level functions to read from memory work on operands as sources.
4 use std::convert::{TryFrom, TryInto};
6 use rustc::ty::layout::{
7 self, HasDataLayout, IntegerExt, LayoutOf, PrimitiveExt, Size, TyLayout, VariantIdx,
11 use super::{InterpCx, MPlaceTy, Machine, MemPlace, Place, PlaceTy};
12 pub use rustc::mir::interpret::ScalarMaybeUndef;
13 use rustc::mir::interpret::{
14 sign_extend, truncate, AllocId, ConstValue, GlobalId, InterpResult, Pointer, Scalar,
16 use rustc_macros::HashStable;
19 /// An `Immediate` represents a single immediate self-contained Rust value.
21 /// For optimization of a few very common cases, there is also a representation for a pair of
22 /// primitive values (`ScalarPair`). It allows Miri to avoid making allocations for checked binary
23 /// operations and wide pointers. This idea was taken from rustc's codegen.
24 /// In particular, thanks to `ScalarPair`, arithmetic operations and casts can be entirely
25 /// defined on `Immediate`, and do not have to work with a `Place`.
26 #[derive(Copy, Clone, Debug, PartialEq, Eq, HashStable, Hash)]
27 pub enum Immediate<Tag = (), Id = AllocId> {
28 Scalar(ScalarMaybeUndef<Tag, Id>),
29 ScalarPair(ScalarMaybeUndef<Tag, Id>, ScalarMaybeUndef<Tag, Id>),
32 impl<Tag> From<ScalarMaybeUndef<Tag>> for Immediate<Tag> {
34 fn from(val: ScalarMaybeUndef<Tag>) -> Self {
35 Immediate::Scalar(val)
39 impl<Tag> From<Scalar<Tag>> for Immediate<Tag> {
41 fn from(val: Scalar<Tag>) -> Self {
42 Immediate::Scalar(val.into())
46 impl<Tag> From<Pointer<Tag>> for Immediate<Tag> {
48 fn from(val: Pointer<Tag>) -> Self {
49 Immediate::Scalar(Scalar::from(val).into())
53 impl<'tcx, Tag> Immediate<Tag> {
54 pub fn new_slice(val: Scalar<Tag>, len: u64, cx: &impl HasDataLayout) -> Self {
55 Immediate::ScalarPair(
57 Scalar::from_uint(len, cx.data_layout().pointer_size).into(),
61 pub fn new_dyn_trait(val: Scalar<Tag>, vtable: Pointer<Tag>) -> Self {
62 Immediate::ScalarPair(val.into(), vtable.into())
66 pub fn to_scalar_or_undef(self) -> ScalarMaybeUndef<Tag> {
68 Immediate::Scalar(val) => val,
69 Immediate::ScalarPair(..) => bug!("Got a wide pointer where a scalar was expected"),
74 pub fn to_scalar(self) -> InterpResult<'tcx, Scalar<Tag>> {
75 self.to_scalar_or_undef().not_undef()
79 pub fn to_scalar_pair(self) -> InterpResult<'tcx, (Scalar<Tag>, Scalar<Tag>)> {
81 Immediate::Scalar(..) => bug!("Got a thin pointer where a scalar pair was expected"),
82 Immediate::ScalarPair(a, b) => Ok((a.not_undef()?, b.not_undef()?)),
87 // ScalarPair needs a type to interpret, so we often have an immediate and a type together
88 // as input for binary and cast operations.
89 #[derive(Copy, Clone, Debug)]
90 pub struct ImmTy<'tcx, Tag = ()> {
91 pub(crate) imm: Immediate<Tag>,
92 pub layout: TyLayout<'tcx>,
95 // `Tag: Copy` because some methods on `Scalar` consume them by value
96 impl<Tag: Copy> std::fmt::Display for ImmTy<'tcx, Tag> {
97 fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
99 Immediate::Scalar(ScalarMaybeUndef::Scalar(s)) => match s.to_bits(self.layout.size) {
101 match self.layout.ty.kind {
106 super::sign_extend(s, self.layout.size) as i128,
109 ty::Uint(_) => return write!(fmt, "{}", s),
110 ty::Bool if s == 0 => return fmt.write_str("false"),
111 ty::Bool if s == 1 => return fmt.write_str("true"),
113 if let Some(c) = u32::try_from(s).ok().and_then(std::char::from_u32) {
114 return write!(fmt, "{}", c);
117 ty::Float(ast::FloatTy::F32) => {
118 if let Ok(u) = u32::try_from(s) {
119 return write!(fmt, "{}", f32::from_bits(u));
122 ty::Float(ast::FloatTy::F64) => {
123 if let Ok(u) = u64::try_from(s) {
124 return write!(fmt, "{}", f64::from_bits(u));
129 write!(fmt, "{:x}", s)
131 Err(_) => fmt.write_str("{pointer}"),
133 Immediate::Scalar(ScalarMaybeUndef::Undef) => fmt.write_str("{undef}"),
134 Immediate::ScalarPair(..) => fmt.write_str("{wide pointer or tuple}"),
139 impl<'tcx, Tag> ::std::ops::Deref for ImmTy<'tcx, Tag> {
140 type Target = Immediate<Tag>;
142 fn deref(&self) -> &Immediate<Tag> {
147 /// An `Operand` is the result of computing a `mir::Operand`. It can be immediate,
148 /// or still in memory. The latter is an optimization, to delay reading that chunk of
149 /// memory and to avoid having to store arbitrary-sized data here.
150 #[derive(Copy, Clone, Debug, PartialEq, Eq, HashStable, Hash)]
151 pub enum Operand<Tag = (), Id = AllocId> {
152 Immediate(Immediate<Tag, Id>),
153 Indirect(MemPlace<Tag, Id>),
156 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
157 pub struct OpTy<'tcx, Tag = ()> {
158 op: Operand<Tag>, // Keep this private; it helps enforce invariants.
159 pub layout: TyLayout<'tcx>,
162 impl<'tcx, Tag> ::std::ops::Deref for OpTy<'tcx, Tag> {
163 type Target = Operand<Tag>;
165 fn deref(&self) -> &Operand<Tag> {
170 impl<'tcx, Tag: Copy> From<MPlaceTy<'tcx, Tag>> for OpTy<'tcx, Tag> {
172 fn from(mplace: MPlaceTy<'tcx, Tag>) -> Self {
173 OpTy { op: Operand::Indirect(*mplace), layout: mplace.layout }
177 impl<'tcx, Tag> From<ImmTy<'tcx, Tag>> for OpTy<'tcx, Tag> {
179 fn from(val: ImmTy<'tcx, Tag>) -> Self {
180 OpTy { op: Operand::Immediate(val.imm), layout: val.layout }
184 impl<'tcx, Tag: Copy> ImmTy<'tcx, Tag> {
186 pub fn from_scalar(val: Scalar<Tag>, layout: TyLayout<'tcx>) -> Self {
187 ImmTy { imm: val.into(), layout }
191 pub fn try_from_uint(i: impl Into<u128>, layout: TyLayout<'tcx>) -> Option<Self> {
192 Some(Self::from_scalar(Scalar::try_from_uint(i, layout.size)?, layout))
195 pub fn from_uint(i: impl Into<u128>, layout: TyLayout<'tcx>) -> Self {
196 Self::from_scalar(Scalar::from_uint(i, layout.size), layout)
200 pub fn try_from_int(i: impl Into<i128>, layout: TyLayout<'tcx>) -> Option<Self> {
201 Some(Self::from_scalar(Scalar::try_from_int(i, layout.size)?, layout))
205 pub fn from_int(i: impl Into<i128>, layout: TyLayout<'tcx>) -> Self {
206 Self::from_scalar(Scalar::from_int(i, layout.size), layout)
210 pub fn to_bits(self) -> InterpResult<'tcx, u128> {
211 self.to_scalar()?.to_bits(self.layout.size)
215 // Use the existing layout if given (but sanity check in debug mode),
216 // or compute the layout.
218 pub(super) fn from_known_layout<'tcx>(
219 layout: Option<TyLayout<'tcx>>,
220 compute: impl FnOnce() -> InterpResult<'tcx, TyLayout<'tcx>>,
221 ) -> InterpResult<'tcx, TyLayout<'tcx>> {
225 if cfg!(debug_assertions) {
226 let layout2 = compute()?;
228 layout.details, layout2.details,
229 "mismatch in layout of supposedly equal-layout types {:?} and {:?}",
230 layout.ty, layout2.ty
238 impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
239 /// Normalice `place.ptr` to a `Pointer` if this is a place and not a ZST.
240 /// Can be helpful to avoid lots of `force_ptr` calls later, if this place is used a lot.
244 op: OpTy<'tcx, M::PointerTag>,
245 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
246 match op.try_as_mplace(self) {
247 Ok(mplace) => Ok(self.force_mplace_ptr(mplace)?.into()),
248 Err(imm) => Ok(imm.into()), // Nothing to cast/force
252 /// Try reading an immediate in memory; this is interesting particularly for `ScalarPair`.
253 /// Returns `None` if the layout does not permit loading this as a value.
254 fn try_read_immediate_from_mplace(
256 mplace: MPlaceTy<'tcx, M::PointerTag>,
257 ) -> InterpResult<'tcx, Option<ImmTy<'tcx, M::PointerTag>>> {
258 if mplace.layout.is_unsized() {
259 // Don't touch unsized
264 .check_mplace_access(mplace, None)
265 .expect("places should be checked on creation")
269 return Ok(Some(ImmTy {
271 imm: Scalar::zst().into(),
272 layout: mplace.layout,
277 match mplace.layout.abi {
278 layout::Abi::Scalar(..) => {
279 let scalar = self.memory.get_raw(ptr.alloc_id)?.read_scalar(
284 Ok(Some(ImmTy { imm: scalar.into(), layout: mplace.layout }))
286 layout::Abi::ScalarPair(ref a, ref b) => {
287 // We checked `ptr_align` above, so all fields will have the alignment they need.
288 // We would anyway check against `ptr_align.restrict_for_offset(b_offset)`,
289 // which `ptr.offset(b_offset)` cannot possibly fail to satisfy.
290 let (a, b) = (&a.value, &b.value);
291 let (a_size, b_size) = (a.size(self), b.size(self));
293 let b_offset = a_size.align_to(b.align(self).abi);
294 assert!(b_offset.bytes() > 0); // we later use the offset to tell apart the fields
295 let b_ptr = ptr.offset(b_offset, self)?;
296 let a_val = self.memory.get_raw(ptr.alloc_id)?.read_scalar(self, a_ptr, a_size)?;
297 let b_val = self.memory.get_raw(ptr.alloc_id)?.read_scalar(self, b_ptr, b_size)?;
298 Ok(Some(ImmTy { imm: Immediate::ScalarPair(a_val, b_val), layout: mplace.layout }))
304 /// Try returning an immediate for the operand.
305 /// If the layout does not permit loading this as an immediate, return where in memory
306 /// we can find the data.
307 /// Note that for a given layout, this operation will either always fail or always
308 /// succeed! Whether it succeeds depends on whether the layout can be represented
309 /// in a `Immediate`, not on which data is stored there currently.
310 pub(crate) fn try_read_immediate(
312 src: OpTy<'tcx, M::PointerTag>,
313 ) -> InterpResult<'tcx, Result<ImmTy<'tcx, M::PointerTag>, MPlaceTy<'tcx, M::PointerTag>>> {
314 Ok(match src.try_as_mplace(self) {
316 if let Some(val) = self.try_read_immediate_from_mplace(mplace)? {
326 /// Read an immediate from a place, asserting that that is possible with the given layout.
328 pub fn read_immediate(
330 op: OpTy<'tcx, M::PointerTag>,
331 ) -> InterpResult<'tcx, ImmTy<'tcx, M::PointerTag>> {
332 if let Ok(imm) = self.try_read_immediate(op)? {
335 bug!("primitive read failed for type: {:?}", op.layout.ty);
339 /// Read a scalar from a place
342 op: OpTy<'tcx, M::PointerTag>,
343 ) -> InterpResult<'tcx, ScalarMaybeUndef<M::PointerTag>> {
344 Ok(self.read_immediate(op)?.to_scalar_or_undef())
347 // Turn the wide MPlace into a string (must already be dereferenced!)
348 pub fn read_str(&self, mplace: MPlaceTy<'tcx, M::PointerTag>) -> InterpResult<'tcx, &str> {
349 let len = mplace.len(self)?;
350 let bytes = self.memory.read_bytes(mplace.ptr, Size::from_bytes(len as u64))?;
351 let str = ::std::str::from_utf8(bytes)
352 .map_err(|err| err_unsup!(ValidationFailure(err.to_string())))?;
356 /// Projection functions
357 pub fn operand_field(
359 op: OpTy<'tcx, M::PointerTag>,
361 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
362 let base = match op.try_as_mplace(self) {
365 let field = self.mplace_field(mplace, field)?;
366 return Ok(field.into());
371 let field = field.try_into().unwrap();
372 let field_layout = op.layout.field(self, field)?;
373 if field_layout.is_zst() {
374 let immediate = Scalar::zst().into();
375 return Ok(OpTy { op: Operand::Immediate(immediate), layout: field_layout });
377 let offset = op.layout.fields.offset(field);
378 let immediate = match *base {
379 // the field covers the entire type
380 _ if offset.bytes() == 0 && field_layout.size == op.layout.size => *base,
381 // extract fields from types with `ScalarPair` ABI
382 Immediate::ScalarPair(a, b) => {
383 let val = if offset.bytes() == 0 { a } else { b };
386 Immediate::Scalar(val) => {
387 bug!("field access on non aggregate {:#?}, {:#?}", val, op.layout)
390 Ok(OpTy { op: Operand::Immediate(immediate), layout: field_layout })
393 pub fn operand_downcast(
395 op: OpTy<'tcx, M::PointerTag>,
397 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
398 // Downcasts only change the layout
399 Ok(match op.try_as_mplace(self) {
400 Ok(mplace) => self.mplace_downcast(mplace, variant)?.into(),
402 let layout = op.layout.for_variant(self, variant);
403 OpTy { layout, ..op }
408 pub fn operand_projection(
410 base: OpTy<'tcx, M::PointerTag>,
411 proj_elem: &mir::PlaceElem<'tcx>,
412 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
413 use rustc::mir::ProjectionElem::*;
414 Ok(match *proj_elem {
415 Field(field, _) => self.operand_field(base, field.index() as u64)?,
416 Downcast(_, variant) => self.operand_downcast(base, variant)?,
417 Deref => self.deref_operand(base)?.into(),
418 Subslice { .. } | ConstantIndex { .. } | Index(_) => {
419 // The rest should only occur as mplace, we do not use Immediates for types
420 // allowing such operations. This matches place_projection forcing an allocation.
421 let mplace = base.assert_mem_place(self);
422 self.mplace_projection(mplace, proj_elem)?.into()
427 /// This is used by [priroda](https://github.com/oli-obk/priroda) to get an OpTy from a local
430 frame: &super::Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>,
432 layout: Option<TyLayout<'tcx>>,
433 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
434 assert_ne!(local, mir::RETURN_PLACE);
435 let layout = self.layout_of_local(frame, local, layout)?;
436 let op = if layout.is_zst() {
437 // Do not read from ZST, they might not be initialized
438 Operand::Immediate(Scalar::zst().into())
440 M::access_local(&self, frame, local)?
442 Ok(OpTy { op, layout })
445 /// Every place can be read from, so we can turn them into an operand
449 place: PlaceTy<'tcx, M::PointerTag>,
450 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
451 let op = match *place {
452 Place::Ptr(mplace) => Operand::Indirect(mplace),
453 Place::Local { frame, local } => *self.access_local(&self.stack[frame], local, None)?,
455 Ok(OpTy { op, layout: place.layout })
458 // Evaluate a place with the goal of reading from it. This lets us sometimes
459 // avoid allocations.
460 pub fn eval_place_to_op(
462 place: &mir::Place<'tcx>,
463 layout: Option<TyLayout<'tcx>>,
464 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
465 let base_op = match place.local {
466 mir::RETURN_PLACE => throw_unsup!(ReadFromReturnPointer),
468 // Do not use the layout passed in as argument if the base we are looking at
469 // here is not the entire place.
470 let layout = if place.projection.is_empty() { layout } else { None };
472 self.access_local(self.frame(), local, layout)?
479 .try_fold(base_op, |op, elem| self.operand_projection(op, elem))?;
481 trace!("eval_place_to_op: got {:?}", *op);
485 /// Evaluate the operand, returning a place where you can then find the data.
486 /// If you already know the layout, you can save two table lookups
487 /// by passing it in here.
490 mir_op: &mir::Operand<'tcx>,
491 layout: Option<TyLayout<'tcx>>,
492 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
493 use rustc::mir::Operand::*;
494 let op = match *mir_op {
495 // FIXME: do some more logic on `move` to invalidate the old location
496 Copy(ref place) | Move(ref place) => self.eval_place_to_op(place, layout)?,
498 Constant(ref constant) => {
499 let val = self.subst_from_frame_and_normalize_erasing_regions(constant.literal);
500 self.eval_const_to_op(val, layout)?
503 trace!("{:?}: {:?}", mir_op, *op);
507 /// Evaluate a bunch of operands at once
508 pub(super) fn eval_operands(
510 ops: &[mir::Operand<'tcx>],
511 ) -> InterpResult<'tcx, Vec<OpTy<'tcx, M::PointerTag>>> {
512 ops.into_iter().map(|op| self.eval_operand(op, None)).collect()
515 // Used when the miri-engine runs into a constant and for extracting information from constants
516 // in patterns via the `const_eval` module
517 /// The `val` and `layout` are assumed to already be in our interpreter
518 /// "universe" (param_env).
519 crate fn eval_const_to_op(
521 val: &'tcx ty::Const<'tcx>,
522 layout: Option<TyLayout<'tcx>>,
523 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
524 let tag_scalar = |scalar| match scalar {
525 Scalar::Ptr(ptr) => Scalar::Ptr(self.tag_static_base_pointer(ptr)),
526 Scalar::Raw { data, size } => Scalar::Raw { data, size },
528 // Early-return cases.
529 let val_val = match val.val {
530 ty::ConstKind::Param(_) => throw_inval!(TooGeneric),
531 ty::ConstKind::Unevaluated(def_id, substs, promoted) => {
532 let instance = self.resolve(def_id, substs)?;
533 // We use `const_eval` here and `const_eval_raw` elsewhere in mir interpretation.
534 // The reason we use `const_eval_raw` everywhere else is to prevent cycles during
535 // validation, because validation automatically reads through any references, thus
536 // potentially requiring the current static to be evaluated again. This is not a
537 // problem here, because we are building an operand which means an actual read is
539 return Ok(OpTy::from(self.const_eval(GlobalId { instance, promoted })?));
541 ty::ConstKind::Infer(..)
542 | ty::ConstKind::Bound(..)
543 | ty::ConstKind::Placeholder(..) => {
544 bug!("eval_const_to_op: Unexpected ConstKind {:?}", val)
546 ty::ConstKind::Value(val_val) => {
550 // Other cases need layout.
551 let layout = from_known_layout(layout, || self.layout_of(val.ty))?;
552 let op = match val_val {
553 ConstValue::ByRef { alloc, offset } => {
554 let id = self.tcx.alloc_map.lock().create_memory_alloc(alloc);
555 // We rely on mutability being set correctly in that allocation to prevent writes
556 // where none should happen.
557 let ptr = self.tag_static_base_pointer(Pointer::new(id, offset));
558 Operand::Indirect(MemPlace::from_ptr(ptr, layout.align.abi))
560 ConstValue::Scalar(x) => Operand::Immediate(tag_scalar(x).into()),
561 ConstValue::Slice { data, start, end } => {
562 // We rely on mutability being set correctly in `data` to prevent writes
563 // where none should happen.
564 let ptr = Pointer::new(
565 self.tcx.alloc_map.lock().create_memory_alloc(data),
566 Size::from_bytes(start as u64), // offset: `start`
568 Operand::Immediate(Immediate::new_slice(
569 self.tag_static_base_pointer(ptr).into(),
570 (end - start) as u64, // len: `end - start`
575 Ok(OpTy { op, layout })
578 /// Read discriminant, return the runtime value as well as the variant index.
579 pub fn read_discriminant(
581 rval: OpTy<'tcx, M::PointerTag>,
582 ) -> InterpResult<'tcx, (u128, VariantIdx)> {
583 trace!("read_discriminant_value {:#?}", rval.layout);
585 let (discr_layout, discr_kind, discr_index) = match rval.layout.variants {
586 layout::Variants::Single { index } => {
590 .discriminant_for_variant(*self.tcx, index)
591 .map_or(index.as_u32() as u128, |discr| discr.val);
592 return Ok((discr_val, index));
594 layout::Variants::Multiple {
595 discr: ref discr_layout,
599 } => (discr_layout, discr_kind, discr_index),
602 // read raw discriminant value
603 let discr_op = self.operand_field(rval, discr_index as u64)?;
604 let discr_val = self.read_immediate(discr_op)?;
605 let raw_discr = discr_val.to_scalar_or_undef();
606 trace!("discr value: {:?}", raw_discr);
608 Ok(match *discr_kind {
609 layout::DiscriminantKind::Tag => {
610 let bits_discr = raw_discr
612 .and_then(|raw_discr| self.force_bits(raw_discr, discr_val.layout.size))
613 .map_err(|_| err_ub!(InvalidDiscriminant(raw_discr.erase_tag())))?;
614 let real_discr = if discr_val.layout.ty.is_signed() {
615 // going from layout tag type to typeck discriminant type
616 // requires first sign extending with the discriminant layout
617 let sexted = sign_extend(bits_discr, discr_val.layout.size) as i128;
618 // and then zeroing with the typeck discriminant type
623 .expect("tagged layout corresponds to adt")
626 let size = layout::Integer::from_attr(self, discr_ty).size();
627 let truncatee = sexted as u128;
628 truncate(truncatee, size)
632 // Make sure we catch invalid discriminants
633 let index = match rval.layout.ty.kind {
635 adt.discriminants(self.tcx.tcx).find(|(_, var)| var.val == real_discr)
637 ty::Generator(def_id, substs, _) => {
638 let substs = substs.as_generator();
640 .discriminants(def_id, self.tcx.tcx)
641 .find(|(_, var)| var.val == real_discr)
643 _ => bug!("tagged layout for non-adt non-generator"),
645 .ok_or_else(|| err_ub!(InvalidDiscriminant(raw_discr.erase_tag())))?;
646 (real_discr, index.0)
648 layout::DiscriminantKind::Niche {
653 let variants_start = niche_variants.start().as_u32();
654 let variants_end = niche_variants.end().as_u32();
655 let raw_discr = raw_discr
657 .map_err(|_| err_ub!(InvalidDiscriminant(ScalarMaybeUndef::Undef)))?;
658 match raw_discr.to_bits_or_ptr(discr_val.layout.size, self) {
660 // The niche must be just 0 (which an inbounds pointer value never is)
661 let ptr_valid = niche_start == 0
662 && variants_start == variants_end
663 && !self.memory.ptr_may_be_null(ptr);
665 throw_ub!(InvalidDiscriminant(raw_discr.erase_tag().into()))
667 (dataful_variant.as_u32() as u128, dataful_variant)
670 // We need to use machine arithmetic to get the relative variant idx:
671 // variant_index_relative = discr_val - niche_start_val
673 self.layout_of(discr_layout.value.to_int_ty(*self.tcx))?;
674 let discr_val = ImmTy::from_uint(raw_discr, discr_layout);
675 let niche_start_val = ImmTy::from_uint(niche_start, discr_layout);
676 let variant_index_relative_val =
677 self.binary_op(mir::BinOp::Sub, discr_val, niche_start_val)?;
678 let variant_index_relative = variant_index_relative_val
680 .assert_bits(discr_val.layout.size);
681 // Check if this is in the range that indicates an actual discriminant.
682 if variant_index_relative <= u128::from(variants_end - variants_start) {
683 let variant_index_relative = u32::try_from(variant_index_relative)
684 .expect("we checked that this fits into a u32");
685 // Then computing the absolute variant idx should not overflow any more.
686 let variant_index = variants_start
687 .checked_add(variant_index_relative)
688 .expect("oveflow computing absolute variant idx");
689 let variants_len = rval
693 .expect("tagged layout for non adt")
696 assert!((variant_index as usize) < variants_len);
697 (u128::from(variant_index), VariantIdx::from_u32(variant_index))
699 (u128::from(dataful_variant.as_u32()), dataful_variant)