1 //! Functions concerning immediate values and operands, and reading from operands.
2 //! All high-level functions to read from memory work on operands as sources.
4 use std::convert::TryFrom;
7 use rustc_errors::ErrorReported;
8 use rustc_hir::def::Namespace;
9 use rustc_macros::HashStable;
10 use rustc_middle::ty::layout::{PrimitiveExt, TyAndLayout};
11 use rustc_middle::ty::print::{FmtPrinter, PrettyPrinter, Printer};
12 use rustc_middle::ty::{ConstInt, Ty};
13 use rustc_middle::{mir, ty};
14 use rustc_target::abi::{Abi, HasDataLayout, LayoutOf, Size, TagEncoding};
15 use rustc_target::abi::{VariantIdx, Variants};
18 from_known_layout, mir_assign_valid_types, ConstValue, GlobalId, InterpCx, InterpResult,
19 MPlaceTy, Machine, MemPlace, Place, PlaceTy, Pointer, Scalar, ScalarMaybeUninit,
22 /// An `Immediate` represents a single immediate self-contained Rust value.
24 /// For optimization of a few very common cases, there is also a representation for a pair of
25 /// primitive values (`ScalarPair`). It allows Miri to avoid making allocations for checked binary
26 /// operations and wide pointers. This idea was taken from rustc's codegen.
27 /// In particular, thanks to `ScalarPair`, arithmetic operations and casts can be entirely
28 /// defined on `Immediate`, and do not have to work with a `Place`.
29 #[derive(Copy, Clone, Debug, PartialEq, Eq, HashStable, Hash)]
30 pub enum Immediate<Tag = ()> {
31 Scalar(ScalarMaybeUninit<Tag>),
32 ScalarPair(ScalarMaybeUninit<Tag>, ScalarMaybeUninit<Tag>),
35 #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
36 rustc_data_structures::static_assert_size!(Immediate, 56);
38 impl<Tag> From<ScalarMaybeUninit<Tag>> for Immediate<Tag> {
40 fn from(val: ScalarMaybeUninit<Tag>) -> Self {
41 Immediate::Scalar(val)
45 impl<Tag> From<Scalar<Tag>> for Immediate<Tag> {
47 fn from(val: Scalar<Tag>) -> Self {
48 Immediate::Scalar(val.into())
52 impl<Tag> From<Pointer<Tag>> for Immediate<Tag> {
54 fn from(val: Pointer<Tag>) -> Self {
55 Immediate::Scalar(Scalar::from(val).into())
59 impl<'tcx, Tag> Immediate<Tag> {
60 pub fn new_slice(val: Scalar<Tag>, len: u64, cx: &impl HasDataLayout) -> Self {
61 Immediate::ScalarPair(val.into(), Scalar::from_machine_usize(len, cx).into())
64 pub fn new_dyn_trait(val: Scalar<Tag>, vtable: Pointer<Tag>) -> Self {
65 Immediate::ScalarPair(val.into(), vtable.into())
69 pub fn to_scalar_or_uninit(self) -> ScalarMaybeUninit<Tag> {
71 Immediate::Scalar(val) => val,
72 Immediate::ScalarPair(..) => bug!("Got a wide pointer where a scalar was expected"),
77 pub fn to_scalar(self) -> InterpResult<'tcx, Scalar<Tag>> {
78 self.to_scalar_or_uninit().check_init()
82 // ScalarPair needs a type to interpret, so we often have an immediate and a type together
83 // as input for binary and cast operations.
84 #[derive(Copy, Clone, Debug)]
85 pub struct ImmTy<'tcx, Tag = ()> {
87 pub layout: TyAndLayout<'tcx>,
90 #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
91 rustc_data_structures::static_assert_size!(ImmTy<'_>, 72);
93 impl<Tag: Copy> std::fmt::Display for ImmTy<'tcx, Tag> {
94 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
95 /// Helper function for printing a scalar to a FmtPrinter
96 fn p<'a, 'tcx, F: std::fmt::Write, Tag>(
97 cx: FmtPrinter<'a, 'tcx, F>,
98 s: ScalarMaybeUninit<Tag>,
100 ) -> Result<FmtPrinter<'a, 'tcx, F>, std::fmt::Error> {
102 ScalarMaybeUninit::Scalar(s) => {
103 cx.pretty_print_const_scalar(s.erase_tag(), ty, true)
105 ScalarMaybeUninit::Uninit => cx.typed_value(
107 this.write_str("uninit ")?;
110 |this| this.print_type(ty),
115 ty::tls::with(|tcx| {
117 Immediate::Scalar(s) => {
118 if let Some(ty) = tcx.lift(self.layout.ty) {
119 let cx = FmtPrinter::new(tcx, f, Namespace::ValueNS);
123 write!(f, "{}: {}", s.erase_tag(), self.layout.ty)
125 Immediate::ScalarPair(a, b) => {
126 // FIXME(oli-obk): at least print tuples and slices nicely
127 write!(f, "({}, {}): {}", a.erase_tag(), b.erase_tag(), self.layout.ty,)
134 impl<'tcx, Tag> std::ops::Deref for ImmTy<'tcx, Tag> {
135 type Target = Immediate<Tag>;
137 fn deref(&self) -> &Immediate<Tag> {
142 /// An `Operand` is the result of computing a `mir::Operand`. It can be immediate,
143 /// or still in memory. The latter is an optimization, to delay reading that chunk of
144 /// memory and to avoid having to store arbitrary-sized data here.
145 #[derive(Copy, Clone, Debug, PartialEq, Eq, HashStable, Hash)]
146 pub enum Operand<Tag = ()> {
147 Immediate(Immediate<Tag>),
148 Indirect(MemPlace<Tag>),
151 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
152 pub struct OpTy<'tcx, Tag = ()> {
153 op: Operand<Tag>, // Keep this private; it helps enforce invariants.
154 pub layout: TyAndLayout<'tcx>,
157 #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
158 rustc_data_structures::static_assert_size!(OpTy<'_, ()>, 80);
160 impl<'tcx, Tag> std::ops::Deref for OpTy<'tcx, Tag> {
161 type Target = Operand<Tag>;
163 fn deref(&self) -> &Operand<Tag> {
168 impl<'tcx, Tag: Copy> From<MPlaceTy<'tcx, Tag>> for OpTy<'tcx, Tag> {
170 fn from(mplace: MPlaceTy<'tcx, Tag>) -> Self {
171 OpTy { op: Operand::Indirect(*mplace), layout: mplace.layout }
175 impl<'tcx, Tag: Copy> From<&'_ MPlaceTy<'tcx, Tag>> for OpTy<'tcx, Tag> {
177 fn from(mplace: &MPlaceTy<'tcx, Tag>) -> Self {
178 OpTy { op: Operand::Indirect(**mplace), layout: mplace.layout }
182 impl<'tcx, Tag> From<ImmTy<'tcx, Tag>> for OpTy<'tcx, Tag> {
184 fn from(val: ImmTy<'tcx, Tag>) -> Self {
185 OpTy { op: Operand::Immediate(val.imm), layout: val.layout }
189 impl<'tcx, Tag: Copy> ImmTy<'tcx, Tag> {
191 pub fn from_scalar(val: Scalar<Tag>, layout: TyAndLayout<'tcx>) -> Self {
192 ImmTy { imm: val.into(), layout }
196 pub fn from_immediate(imm: Immediate<Tag>, layout: TyAndLayout<'tcx>) -> Self {
197 ImmTy { imm, layout }
201 pub fn try_from_uint(i: impl Into<u128>, layout: TyAndLayout<'tcx>) -> Option<Self> {
202 Some(Self::from_scalar(Scalar::try_from_uint(i, layout.size)?, layout))
205 pub fn from_uint(i: impl Into<u128>, layout: TyAndLayout<'tcx>) -> Self {
206 Self::from_scalar(Scalar::from_uint(i, layout.size), layout)
210 pub fn try_from_int(i: impl Into<i128>, layout: TyAndLayout<'tcx>) -> Option<Self> {
211 Some(Self::from_scalar(Scalar::try_from_int(i, layout.size)?, layout))
215 pub fn from_int(i: impl Into<i128>, layout: TyAndLayout<'tcx>) -> Self {
216 Self::from_scalar(Scalar::from_int(i, layout.size), layout)
220 pub fn to_const_int(self) -> ConstInt {
221 assert!(self.layout.ty.is_integral());
222 let int = self.to_scalar().expect("to_const_int doesn't work on scalar pairs").assert_int();
223 ConstInt::new(int, self.layout.ty.is_signed(), self.layout.ty.is_ptr_sized_integral())
227 impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
228 /// Normalize `place.ptr` to a `Pointer` if this is a place and not a ZST.
229 /// Can be helpful to avoid lots of `force_ptr` calls later, if this place is used a lot.
233 op: &OpTy<'tcx, M::PointerTag>,
234 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
235 match op.try_as_mplace(self) {
236 Ok(mplace) => Ok(self.force_mplace_ptr(mplace)?.into()),
237 Err(imm) => Ok(imm.into()), // Nothing to cast/force
241 /// Try reading an immediate in memory; this is interesting particularly for `ScalarPair`.
242 /// Returns `None` if the layout does not permit loading this as a value.
243 fn try_read_immediate_from_mplace(
245 mplace: &MPlaceTy<'tcx, M::PointerTag>,
246 ) -> InterpResult<'tcx, Option<ImmTy<'tcx, M::PointerTag>>> {
247 if mplace.layout.is_unsized() {
248 // Don't touch unsized
253 .check_mplace_access(mplace, None)
254 .expect("places should be checked on creation")
258 if let Scalar::Ptr(ptr) = mplace.ptr {
259 // We may be reading from a static.
260 // In order to ensure that `static FOO: Type = FOO;` causes a cycle error
261 // instead of magically pulling *any* ZST value from the ether, we need to
262 // actually access the referenced allocation.
263 self.memory.get_raw(ptr.alloc_id)?;
265 return Ok(Some(ImmTy {
267 imm: Scalar::ZST.into(),
268 layout: mplace.layout,
273 let alloc = self.memory.get_raw(ptr.alloc_id)?;
275 match mplace.layout.abi {
277 let scalar = alloc.read_scalar(self, ptr, mplace.layout.size)?;
278 Ok(Some(ImmTy { imm: scalar.into(), layout: mplace.layout }))
280 Abi::ScalarPair(ref a, ref b) => {
281 // We checked `ptr_align` above, so all fields will have the alignment they need.
282 // We would anyway check against `ptr_align.restrict_for_offset(b_offset)`,
283 // which `ptr.offset(b_offset)` cannot possibly fail to satisfy.
284 let (a, b) = (&a.value, &b.value);
285 let (a_size, b_size) = (a.size(self), b.size(self));
287 let b_offset = a_size.align_to(b.align(self).abi);
288 assert!(b_offset.bytes() > 0); // we later use the offset to tell apart the fields
289 let b_ptr = ptr.offset(b_offset, self)?;
290 let a_val = alloc.read_scalar(self, a_ptr, a_size)?;
291 let b_val = alloc.read_scalar(self, b_ptr, b_size)?;
292 Ok(Some(ImmTy { imm: Immediate::ScalarPair(a_val, b_val), layout: mplace.layout }))
298 /// Try returning an immediate for the operand.
299 /// If the layout does not permit loading this as an immediate, return where in memory
300 /// we can find the data.
301 /// Note that for a given layout, this operation will either always fail or always
302 /// succeed! Whether it succeeds depends on whether the layout can be represented
303 /// in a `Immediate`, not on which data is stored there currently.
304 pub(crate) fn try_read_immediate(
306 src: &OpTy<'tcx, M::PointerTag>,
307 ) -> InterpResult<'tcx, Result<ImmTy<'tcx, M::PointerTag>, MPlaceTy<'tcx, M::PointerTag>>> {
308 Ok(match src.try_as_mplace(self) {
310 if let Some(val) = self.try_read_immediate_from_mplace(mplace)? {
320 /// Read an immediate from a place, asserting that that is possible with the given layout.
322 pub fn read_immediate(
324 op: &OpTy<'tcx, M::PointerTag>,
325 ) -> InterpResult<'tcx, ImmTy<'tcx, M::PointerTag>> {
326 if let Ok(imm) = self.try_read_immediate(op)? {
329 span_bug!(self.cur_span(), "primitive read failed for type: {:?}", op.layout.ty);
333 /// Read a scalar from a place
336 op: &OpTy<'tcx, M::PointerTag>,
337 ) -> InterpResult<'tcx, ScalarMaybeUninit<M::PointerTag>> {
338 Ok(self.read_immediate(op)?.to_scalar_or_uninit())
341 // Turn the wide MPlace into a string (must already be dereferenced!)
342 pub fn read_str(&self, mplace: &MPlaceTy<'tcx, M::PointerTag>) -> InterpResult<'tcx, &str> {
343 let len = mplace.len(self)?;
344 let bytes = self.memory.read_bytes(mplace.ptr, Size::from_bytes(len))?;
345 let str = std::str::from_utf8(bytes).map_err(|err| err_ub!(InvalidStr(err)))?;
349 /// Projection functions
350 pub fn operand_field(
352 op: &OpTy<'tcx, M::PointerTag>,
354 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
355 let base = match op.try_as_mplace(self) {
357 // We can reuse the mplace field computation logic for indirect operands.
358 let field = self.mplace_field(mplace, field)?;
359 return Ok(field.into());
364 let field_layout = op.layout.field(self, field)?;
365 if field_layout.is_zst() {
366 let immediate = Scalar::ZST.into();
367 return Ok(OpTy { op: Operand::Immediate(immediate), layout: field_layout });
369 let offset = op.layout.fields.offset(field);
370 let immediate = match *base {
371 // the field covers the entire type
372 _ if offset.bytes() == 0 && field_layout.size == op.layout.size => *base,
373 // extract fields from types with `ScalarPair` ABI
374 Immediate::ScalarPair(a, b) => {
375 let val = if offset.bytes() == 0 { a } else { b };
378 Immediate::Scalar(val) => span_bug!(
380 "field access on non aggregate {:#?}, {:#?}",
385 Ok(OpTy { op: Operand::Immediate(immediate), layout: field_layout })
388 pub fn operand_index(
390 op: &OpTy<'tcx, M::PointerTag>,
392 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
393 if let Ok(index) = usize::try_from(index) {
394 // We can just treat this as a field.
395 self.operand_field(op, index)
397 // Indexing into a big array. This must be an mplace.
398 let mplace = op.assert_mem_place(self);
399 Ok(self.mplace_index(&mplace, index)?.into())
403 pub fn operand_downcast(
405 op: &OpTy<'tcx, M::PointerTag>,
407 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
408 // Downcasts only change the layout
409 Ok(match op.try_as_mplace(self) {
410 Ok(ref mplace) => self.mplace_downcast(mplace, variant)?.into(),
412 let layout = op.layout.for_variant(self, variant);
413 OpTy { layout, ..*op }
418 pub fn operand_projection(
420 base: &OpTy<'tcx, M::PointerTag>,
421 proj_elem: mir::PlaceElem<'tcx>,
422 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
423 use rustc_middle::mir::ProjectionElem::*;
425 Field(field, _) => self.operand_field(base, field.index())?,
426 Downcast(_, variant) => self.operand_downcast(base, variant)?,
427 Deref => self.deref_operand(base)?.into(),
428 Subslice { .. } | ConstantIndex { .. } | Index(_) => {
429 // The rest should only occur as mplace, we do not use Immediates for types
430 // allowing such operations. This matches place_projection forcing an allocation.
431 let mplace = base.assert_mem_place(self);
432 self.mplace_projection(&mplace, proj_elem)?.into()
437 /// Read from a local. Will not actually access the local if reading from a ZST.
438 /// Will not access memory, instead an indirect `Operand` is returned.
440 /// This is public because it is used by [priroda](https://github.com/oli-obk/priroda) to get an
441 /// OpTy from a local
444 frame: &super::Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>,
446 layout: Option<TyAndLayout<'tcx>>,
447 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
448 let layout = self.layout_of_local(frame, local, layout)?;
449 let op = if layout.is_zst() {
450 // Do not read from ZST, they might not be initialized
451 Operand::Immediate(Scalar::ZST.into())
453 M::access_local(&self, frame, local)?
455 Ok(OpTy { op, layout })
458 /// Every place can be read from, so we can turn them into an operand.
459 /// This will definitely return `Indirect` if the place is a `Ptr`, i.e., this
460 /// will never actually read from memory.
464 place: &PlaceTy<'tcx, M::PointerTag>,
465 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
466 let op = match **place {
467 Place::Ptr(mplace) => Operand::Indirect(mplace),
468 Place::Local { frame, local } => {
469 *self.access_local(&self.stack()[frame], local, None)?
472 Ok(OpTy { op, layout: place.layout })
475 // Evaluate a place with the goal of reading from it. This lets us sometimes
476 // avoid allocations.
477 pub fn eval_place_to_op(
479 place: mir::Place<'tcx>,
480 layout: Option<TyAndLayout<'tcx>>,
481 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
482 // Do not use the layout passed in as argument if the base we are looking at
483 // here is not the entire place.
484 let layout = if place.projection.is_empty() { layout } else { None };
486 let base_op = self.access_local(self.frame(), place.local, layout)?;
491 .try_fold(base_op, |op, elem| self.operand_projection(&op, elem))?;
493 trace!("eval_place_to_op: got {:?}", *op);
494 // Sanity-check the type we ended up with.
495 debug_assert!(mir_assign_valid_types(
498 self.layout_of(self.subst_from_current_frame_and_normalize_erasing_regions(
499 place.ty(&self.frame().body.local_decls, *self.tcx).ty
506 /// Evaluate the operand, returning a place where you can then find the data.
507 /// If you already know the layout, you can save two table lookups
508 /// by passing it in here.
512 mir_op: &mir::Operand<'tcx>,
513 layout: Option<TyAndLayout<'tcx>>,
514 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
515 use rustc_middle::mir::Operand::*;
516 let op = match *mir_op {
517 // FIXME: do some more logic on `move` to invalidate the old location
518 Copy(place) | Move(place) => self.eval_place_to_op(place, layout)?,
520 Constant(ref constant) => {
522 self.subst_from_current_frame_and_normalize_erasing_regions(constant.literal);
523 // This can still fail:
524 // * During ConstProp, with `TooGeneric` or since the `requried_consts` were not all
526 // * During CTFE, since promoteds in `const`/`static` initializer bodies can fail.
528 self.mir_const_to_op(&val, layout)?
531 trace!("{:?}: {:?}", mir_op, *op);
535 /// Evaluate a bunch of operands at once
536 pub(super) fn eval_operands(
538 ops: &[mir::Operand<'tcx>],
539 ) -> InterpResult<'tcx, Vec<OpTy<'tcx, M::PointerTag>>> {
540 ops.iter().map(|op| self.eval_operand(op, None)).collect()
543 // Used when the miri-engine runs into a constant and for extracting information from constants
544 // in patterns via the `const_eval` module
545 /// The `val` and `layout` are assumed to already be in our interpreter
546 /// "universe" (param_env).
547 crate fn const_to_op(
549 val: &ty::Const<'tcx>,
550 layout: Option<TyAndLayout<'tcx>>,
551 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
553 ty::ConstKind::Param(_) | ty::ConstKind::Bound(..) => throw_inval!(TooGeneric),
554 ty::ConstKind::Error(_) => throw_inval!(AlreadyReported(ErrorReported)),
555 ty::ConstKind::Unevaluated(ty::Unevaluated { def, substs, promoted }) => {
556 let instance = self.resolve(def, substs)?;
557 Ok(self.eval_to_allocation(GlobalId { instance, promoted })?.into())
559 ty::ConstKind::Infer(..) | ty::ConstKind::Placeholder(..) => {
560 span_bug!(self.cur_span(), "const_to_op: Unexpected ConstKind {:?}", val)
562 ty::ConstKind::Value(val_val) => self.const_val_to_op(val_val, val.ty, layout),
566 crate fn mir_const_to_op(
568 val: &mir::ConstantKind<'tcx>,
569 layout: Option<TyAndLayout<'tcx>>,
570 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
572 mir::ConstantKind::Ty(ct) => self.const_to_op(ct, layout),
573 mir::ConstantKind::Val(val, ty) => self.const_val_to_op(*val, ty, layout),
577 crate fn const_val_to_op(
579 val_val: ConstValue<'tcx>,
581 layout: Option<TyAndLayout<'tcx>>,
582 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
583 // Other cases need layout.
584 let tag_scalar = |scalar| -> InterpResult<'tcx, _> {
586 Scalar::Ptr(ptr) => Scalar::Ptr(self.global_base_pointer(ptr)?),
587 Scalar::Int(int) => Scalar::Int(int),
590 let layout = from_known_layout(self.tcx, self.param_env, layout, || self.layout_of(ty))?;
591 let op = match val_val {
592 ConstValue::ByRef { alloc, offset } => {
593 let id = self.tcx.create_memory_alloc(alloc);
594 // We rely on mutability being set correctly in that allocation to prevent writes
595 // where none should happen.
596 let ptr = self.global_base_pointer(Pointer::new(id, offset))?;
597 Operand::Indirect(MemPlace::from_ptr(ptr, layout.align.abi))
599 ConstValue::Scalar(x) => Operand::Immediate(tag_scalar(x)?.into()),
600 ConstValue::Slice { data, start, end } => {
601 // We rely on mutability being set correctly in `data` to prevent writes
602 // where none should happen.
603 let ptr = Pointer::new(
604 self.tcx.create_memory_alloc(data),
605 Size::from_bytes(start), // offset: `start`
607 Operand::Immediate(Immediate::new_slice(
608 self.global_base_pointer(ptr)?.into(),
609 u64::try_from(end.checked_sub(start).unwrap()).unwrap(), // len: `end - start`
614 Ok(OpTy { op, layout })
617 /// Read discriminant, return the runtime value as well as the variant index.
618 pub fn read_discriminant(
620 op: &OpTy<'tcx, M::PointerTag>,
621 ) -> InterpResult<'tcx, (Scalar<M::PointerTag>, VariantIdx)> {
622 trace!("read_discriminant_value {:#?}", op.layout);
623 // Get type and layout of the discriminant.
624 let discr_layout = self.layout_of(op.layout.ty.discriminant_ty(*self.tcx))?;
625 trace!("discriminant type: {:?}", discr_layout.ty);
627 // We use "discriminant" to refer to the value associated with a particular enum variant.
628 // This is not to be confused with its "variant index", which is just determining its position in the
629 // declared list of variants -- they can differ with explicitly assigned discriminants.
630 // We use "tag" to refer to how the discriminant is encoded in memory, which can be either
631 // straight-forward (`TagEncoding::Direct`) or with a niche (`TagEncoding::Niche`).
632 let (tag_scalar_layout, tag_encoding, tag_field) = match op.layout.variants {
633 Variants::Single { index } => {
634 let discr = match op.layout.ty.discriminant_for_variant(*self.tcx, index) {
636 // This type actually has discriminants.
637 assert_eq!(discr.ty, discr_layout.ty);
638 Scalar::from_uint(discr.val, discr_layout.size)
641 // On a type without actual discriminants, variant is 0.
642 assert_eq!(index.as_u32(), 0);
643 Scalar::from_uint(index.as_u32(), discr_layout.size)
646 return Ok((discr, index));
648 Variants::Multiple { ref tag, ref tag_encoding, tag_field, .. } => {
649 (tag, tag_encoding, tag_field)
653 // There are *three* layouts that come into play here:
654 // - The discriminant has a type for typechecking. This is `discr_layout`, and is used for
655 // the `Scalar` we return.
656 // - The tag (encoded discriminant) has layout `tag_layout`. This is always an integer type,
657 // and used to interpret the value we read from the tag field.
658 // For the return value, a cast to `discr_layout` is performed.
659 // - The field storing the tag has a layout, which is very similar to `tag_layout` but
660 // may be a pointer. This is `tag_val.layout`; we just use it for sanity checks.
662 // Get layout for tag.
663 let tag_layout = self.layout_of(tag_scalar_layout.value.to_int_ty(*self.tcx))?;
665 // Read tag and sanity-check `tag_layout`.
666 let tag_val = self.read_immediate(&self.operand_field(op, tag_field)?)?;
667 assert_eq!(tag_layout.size, tag_val.layout.size);
668 assert_eq!(tag_layout.abi.is_signed(), tag_val.layout.abi.is_signed());
669 let tag_val = tag_val.to_scalar()?;
670 trace!("tag value: {:?}", tag_val);
672 // Figure out which discriminant and variant this corresponds to.
673 Ok(match *tag_encoding {
674 TagEncoding::Direct => {
676 .force_bits(tag_val, tag_layout.size)
677 .map_err(|_| err_ub!(InvalidTag(tag_val.erase_tag())))?;
678 // Cast bits from tag layout to discriminant layout.
679 let discr_val = self.cast_from_scalar(tag_bits, tag_layout, discr_layout.ty);
680 let discr_bits = discr_val.assert_bits(discr_layout.size);
681 // Convert discriminant to variant index, and catch invalid discriminants.
682 let index = match *op.layout.ty.kind() {
684 adt.discriminants(*self.tcx).find(|(_, var)| var.val == discr_bits)
686 ty::Generator(def_id, substs, _) => {
687 let substs = substs.as_generator();
689 .discriminants(def_id, *self.tcx)
690 .find(|(_, var)| var.val == discr_bits)
692 _ => span_bug!(self.cur_span(), "tagged layout for non-adt non-generator"),
694 .ok_or_else(|| err_ub!(InvalidTag(tag_val.erase_tag())))?;
695 // Return the cast value, and the index.
698 TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start } => {
699 // Compute the variant this niche value/"tag" corresponds to. With niche layout,
700 // discriminant (encoded in niche/tag) and variant index are the same.
701 let variants_start = niche_variants.start().as_u32();
702 let variants_end = niche_variants.end().as_u32();
703 let variant = match tag_val.to_bits_or_ptr(tag_layout.size, self) {
705 // The niche must be just 0 (which an inbounds pointer value never is)
706 let ptr_valid = niche_start == 0
707 && variants_start == variants_end
708 && !self.memory.ptr_may_be_null(ptr);
710 throw_ub!(InvalidTag(tag_val.erase_tag()))
715 // We need to use machine arithmetic to get the relative variant idx:
716 // variant_index_relative = tag_val - niche_start_val
717 let tag_val = ImmTy::from_uint(tag_bits, tag_layout);
718 let niche_start_val = ImmTy::from_uint(niche_start, tag_layout);
719 let variant_index_relative_val =
720 self.binary_op(mir::BinOp::Sub, &tag_val, &niche_start_val)?;
721 let variant_index_relative = variant_index_relative_val
723 .assert_bits(tag_val.layout.size);
724 // Check if this is in the range that indicates an actual discriminant.
725 if variant_index_relative <= u128::from(variants_end - variants_start) {
726 let variant_index_relative = u32::try_from(variant_index_relative)
727 .expect("we checked that this fits into a u32");
728 // Then computing the absolute variant idx should not overflow any more.
729 let variant_index = variants_start
730 .checked_add(variant_index_relative)
731 .expect("overflow computing absolute variant idx");
732 let variants_len = op
736 .expect("tagged layout for non adt")
739 assert!(usize::try_from(variant_index).unwrap() < variants_len);
740 VariantIdx::from_u32(variant_index)
746 // Compute the size of the scalar we need to return.
747 // No need to cast, because the variant index directly serves as discriminant and is
748 // encoded in the tag.
749 (Scalar::from_uint(variant.as_u32(), discr_layout.size), variant)