1 //! Functions concerning immediate values and operands, and reading from operands.
2 //! All high-level functions to read from memory work on operands as sources.
4 use std::convert::TryFrom;
7 use rustc_errors::ErrorReported;
8 use rustc_hir::def::Namespace;
9 use rustc_macros::HashStable;
10 use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt, TyAndLayout};
11 use rustc_middle::ty::print::{FmtPrinter, PrettyPrinter, Printer};
12 use rustc_middle::ty::{ConstInt, Ty};
13 use rustc_middle::{mir, ty};
14 use rustc_target::abi::{Abi, HasDataLayout, Size, TagEncoding};
15 use rustc_target::abi::{VariantIdx, Variants};
18 alloc_range, from_known_layout, mir_assign_valid_types, AllocId, ConstValue, GlobalId,
19 InterpCx, InterpResult, MPlaceTy, Machine, MemPlace, Place, PlaceTy, Pointer, Provenance,
20 Scalar, ScalarMaybeUninit,
23 /// An `Immediate` represents a single immediate self-contained Rust value.
25 /// For optimization of a few very common cases, there is also a representation for a pair of
26 /// primitive values (`ScalarPair`). It allows Miri to avoid making allocations for checked binary
27 /// operations and wide pointers. This idea was taken from rustc's codegen.
28 /// In particular, thanks to `ScalarPair`, arithmetic operations and casts can be entirely
29 /// defined on `Immediate`, and do not have to work with a `Place`.
30 #[derive(Copy, Clone, PartialEq, Eq, HashStable, Hash, Debug)]
31 pub enum Immediate<Tag: Provenance = AllocId> {
32 Scalar(ScalarMaybeUninit<Tag>),
33 ScalarPair(ScalarMaybeUninit<Tag>, ScalarMaybeUninit<Tag>),
36 #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
37 rustc_data_structures::static_assert_size!(Immediate, 56);
39 impl<Tag: Provenance> From<ScalarMaybeUninit<Tag>> for Immediate<Tag> {
41 fn from(val: ScalarMaybeUninit<Tag>) -> Self {
42 Immediate::Scalar(val)
46 impl<Tag: Provenance> From<Scalar<Tag>> for Immediate<Tag> {
48 fn from(val: Scalar<Tag>) -> Self {
49 Immediate::Scalar(val.into())
53 impl<'tcx, Tag: Provenance> Immediate<Tag> {
54 pub fn from_pointer(p: Pointer<Tag>, cx: &impl HasDataLayout) -> Self {
55 Immediate::Scalar(ScalarMaybeUninit::from_pointer(p, cx))
58 pub fn from_maybe_pointer(p: Pointer<Option<Tag>>, cx: &impl HasDataLayout) -> Self {
59 Immediate::Scalar(ScalarMaybeUninit::from_maybe_pointer(p, cx))
62 pub fn new_slice(val: Scalar<Tag>, len: u64, cx: &impl HasDataLayout) -> Self {
63 Immediate::ScalarPair(val.into(), Scalar::from_machine_usize(len, cx).into())
68 vtable: Pointer<Option<Tag>>,
69 cx: &impl HasDataLayout,
71 Immediate::ScalarPair(val.into(), ScalarMaybeUninit::from_maybe_pointer(vtable, cx))
75 pub fn to_scalar_or_uninit(self) -> ScalarMaybeUninit<Tag> {
77 Immediate::Scalar(val) => val,
78 Immediate::ScalarPair(..) => bug!("Got a scalar pair where a scalar was expected"),
83 pub fn to_scalar(self) -> InterpResult<'tcx, Scalar<Tag>> {
84 self.to_scalar_or_uninit().check_init()
88 pub fn to_scalar_pair(self) -> InterpResult<'tcx, (Scalar<Tag>, Scalar<Tag>)> {
90 Immediate::ScalarPair(val1, val2) => Ok((val1.check_init()?, val2.check_init()?)),
91 Immediate::Scalar(..) => {
92 bug!("Got a scalar where a scalar pair was expected")
98 // ScalarPair needs a type to interpret, so we often have an immediate and a type together
99 // as input for binary and cast operations.
100 #[derive(Copy, Clone, Debug)]
101 pub struct ImmTy<'tcx, Tag: Provenance = AllocId> {
103 pub layout: TyAndLayout<'tcx>,
106 #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
107 rustc_data_structures::static_assert_size!(ImmTy<'_>, 72);
109 impl<Tag: Provenance> std::fmt::Display for ImmTy<'tcx, Tag> {
110 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
111 /// Helper function for printing a scalar to a FmtPrinter
112 fn p<'a, 'tcx, F: std::fmt::Write, Tag: Provenance>(
113 cx: FmtPrinter<'a, 'tcx, F>,
114 s: ScalarMaybeUninit<Tag>,
116 ) -> Result<FmtPrinter<'a, 'tcx, F>, std::fmt::Error> {
118 ScalarMaybeUninit::Scalar(Scalar::Int(int)) => {
119 cx.pretty_print_const_scalar_int(int, ty, true)
121 ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr, _sz)) => {
122 // Just print the ptr value. `pretty_print_const_scalar_ptr` would also try to
123 // print what is points to, which would fail since it has no access to the local
125 cx.pretty_print_const_pointer(ptr, ty, true)
127 ScalarMaybeUninit::Uninit => cx.typed_value(
129 this.write_str("uninit ")?;
132 |this| this.print_type(ty),
137 ty::tls::with(|tcx| {
139 Immediate::Scalar(s) => {
140 if let Some(ty) = tcx.lift(self.layout.ty) {
141 let cx = FmtPrinter::new(tcx, f, Namespace::ValueNS);
145 write!(f, "{}: {}", s, self.layout.ty)
147 Immediate::ScalarPair(a, b) => {
148 // FIXME(oli-obk): at least print tuples and slices nicely
149 write!(f, "({}, {}): {}", a, b, self.layout.ty,)
156 impl<'tcx, Tag: Provenance> std::ops::Deref for ImmTy<'tcx, Tag> {
157 type Target = Immediate<Tag>;
159 fn deref(&self) -> &Immediate<Tag> {
164 /// An `Operand` is the result of computing a `mir::Operand`. It can be immediate,
165 /// or still in memory. The latter is an optimization, to delay reading that chunk of
166 /// memory and to avoid having to store arbitrary-sized data here.
167 #[derive(Copy, Clone, PartialEq, Eq, HashStable, Hash, Debug)]
168 pub enum Operand<Tag: Provenance = AllocId> {
169 Immediate(Immediate<Tag>),
170 Indirect(MemPlace<Tag>),
173 #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
174 pub struct OpTy<'tcx, Tag: Provenance = AllocId> {
175 op: Operand<Tag>, // Keep this private; it helps enforce invariants.
176 pub layout: TyAndLayout<'tcx>,
179 #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
180 rustc_data_structures::static_assert_size!(OpTy<'_>, 80);
182 impl<'tcx, Tag: Provenance> std::ops::Deref for OpTy<'tcx, Tag> {
183 type Target = Operand<Tag>;
185 fn deref(&self) -> &Operand<Tag> {
190 impl<'tcx, Tag: Provenance> From<MPlaceTy<'tcx, Tag>> for OpTy<'tcx, Tag> {
192 fn from(mplace: MPlaceTy<'tcx, Tag>) -> Self {
193 OpTy { op: Operand::Indirect(*mplace), layout: mplace.layout }
197 impl<'tcx, Tag: Provenance> From<&'_ MPlaceTy<'tcx, Tag>> for OpTy<'tcx, Tag> {
199 fn from(mplace: &MPlaceTy<'tcx, Tag>) -> Self {
200 OpTy { op: Operand::Indirect(**mplace), layout: mplace.layout }
204 impl<'tcx, Tag: Provenance> From<ImmTy<'tcx, Tag>> for OpTy<'tcx, Tag> {
206 fn from(val: ImmTy<'tcx, Tag>) -> Self {
207 OpTy { op: Operand::Immediate(val.imm), layout: val.layout }
211 impl<'tcx, Tag: Provenance> ImmTy<'tcx, Tag> {
213 pub fn from_scalar(val: Scalar<Tag>, layout: TyAndLayout<'tcx>) -> Self {
214 ImmTy { imm: val.into(), layout }
218 pub fn from_immediate(imm: Immediate<Tag>, layout: TyAndLayout<'tcx>) -> Self {
219 ImmTy { imm, layout }
223 pub fn try_from_uint(i: impl Into<u128>, layout: TyAndLayout<'tcx>) -> Option<Self> {
224 Some(Self::from_scalar(Scalar::try_from_uint(i, layout.size)?, layout))
227 pub fn from_uint(i: impl Into<u128>, layout: TyAndLayout<'tcx>) -> Self {
228 Self::from_scalar(Scalar::from_uint(i, layout.size), layout)
232 pub fn try_from_int(i: impl Into<i128>, layout: TyAndLayout<'tcx>) -> Option<Self> {
233 Some(Self::from_scalar(Scalar::try_from_int(i, layout.size)?, layout))
237 pub fn from_int(i: impl Into<i128>, layout: TyAndLayout<'tcx>) -> Self {
238 Self::from_scalar(Scalar::from_int(i, layout.size), layout)
242 pub fn to_const_int(self) -> ConstInt {
243 assert!(self.layout.ty.is_integral());
244 let int = self.to_scalar().expect("to_const_int doesn't work on scalar pairs").assert_int();
245 ConstInt::new(int, self.layout.ty.is_signed(), self.layout.ty.is_ptr_sized_integral())
249 impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
250 /// Try reading an immediate in memory; this is interesting particularly for `ScalarPair`.
251 /// Returns `None` if the layout does not permit loading this as a value.
252 fn try_read_immediate_from_mplace(
254 mplace: &MPlaceTy<'tcx, M::PointerTag>,
255 ) -> InterpResult<'tcx, Option<ImmTy<'tcx, M::PointerTag>>> {
256 if mplace.layout.is_unsized() {
257 // Don't touch unsized
261 let alloc = match self.get_alloc(mplace)? {
264 return Ok(Some(ImmTy {
266 imm: Scalar::ZST.into(),
267 layout: mplace.layout,
272 match mplace.layout.abi {
274 let scalar = alloc.read_scalar(alloc_range(Size::ZERO, mplace.layout.size))?;
275 Ok(Some(ImmTy { imm: scalar.into(), layout: mplace.layout }))
277 Abi::ScalarPair(a, b) => {
278 // We checked `ptr_align` above, so all fields will have the alignment they need.
279 // We would anyway check against `ptr_align.restrict_for_offset(b_offset)`,
280 // which `ptr.offset(b_offset)` cannot possibly fail to satisfy.
281 let (a, b) = (a.value, b.value);
282 let (a_size, b_size) = (a.size(self), b.size(self));
283 let b_offset = a_size.align_to(b.align(self).abi);
284 assert!(b_offset.bytes() > 0); // we later use the offset to tell apart the fields
285 let a_val = alloc.read_scalar(alloc_range(Size::ZERO, a_size))?;
286 let b_val = alloc.read_scalar(alloc_range(b_offset, b_size))?;
287 Ok(Some(ImmTy { imm: Immediate::ScalarPair(a_val, b_val), layout: mplace.layout }))
293 /// Try returning an immediate for the operand.
294 /// If the layout does not permit loading this as an immediate, return where in memory
295 /// we can find the data.
296 /// Note that for a given layout, this operation will either always fail or always
297 /// succeed! Whether it succeeds depends on whether the layout can be represented
298 /// in an `Immediate`, not on which data is stored there currently.
299 pub fn try_read_immediate(
301 src: &OpTy<'tcx, M::PointerTag>,
302 ) -> InterpResult<'tcx, Result<ImmTy<'tcx, M::PointerTag>, MPlaceTy<'tcx, M::PointerTag>>> {
303 Ok(match src.try_as_mplace() {
305 if let Some(val) = self.try_read_immediate_from_mplace(mplace)? {
315 /// Read an immediate from a place, asserting that that is possible with the given layout.
317 pub fn read_immediate(
319 op: &OpTy<'tcx, M::PointerTag>,
320 ) -> InterpResult<'tcx, ImmTy<'tcx, M::PointerTag>> {
321 if let Ok(imm) = self.try_read_immediate(op)? {
324 span_bug!(self.cur_span(), "primitive read failed for type: {:?}", op.layout.ty);
328 /// Read a scalar from a place
331 op: &OpTy<'tcx, M::PointerTag>,
332 ) -> InterpResult<'tcx, ScalarMaybeUninit<M::PointerTag>> {
333 Ok(self.read_immediate(op)?.to_scalar_or_uninit())
336 /// Read a pointer from a place.
339 op: &OpTy<'tcx, M::PointerTag>,
340 ) -> InterpResult<'tcx, Pointer<Option<M::PointerTag>>> {
341 Ok(self.scalar_to_ptr(self.read_scalar(op)?.check_init()?))
344 // Turn the wide MPlace into a string (must already be dereferenced!)
345 pub fn read_str(&self, mplace: &MPlaceTy<'tcx, M::PointerTag>) -> InterpResult<'tcx, &str> {
346 let len = mplace.len(self)?;
347 let bytes = self.memory.read_bytes(mplace.ptr, Size::from_bytes(len))?;
348 let str = std::str::from_utf8(bytes).map_err(|err| err_ub!(InvalidStr(err)))?;
352 /// Projection functions
353 pub fn operand_field(
355 op: &OpTy<'tcx, M::PointerTag>,
357 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
358 let base = match op.try_as_mplace() {
360 // We can reuse the mplace field computation logic for indirect operands.
361 let field = self.mplace_field(mplace, field)?;
362 return Ok(field.into());
367 let field_layout = op.layout.field(self, field);
368 if field_layout.is_zst() {
369 let immediate = Scalar::ZST.into();
370 return Ok(OpTy { op: Operand::Immediate(immediate), layout: field_layout });
372 let offset = op.layout.fields.offset(field);
373 let immediate = match *base {
374 // the field covers the entire type
375 _ if offset.bytes() == 0 && field_layout.size == op.layout.size => *base,
376 // extract fields from types with `ScalarPair` ABI
377 Immediate::ScalarPair(a, b) => {
378 let val = if offset.bytes() == 0 { a } else { b };
381 Immediate::Scalar(val) => span_bug!(
383 "field access on non aggregate {:#?}, {:#?}",
388 Ok(OpTy { op: Operand::Immediate(immediate), layout: field_layout })
391 pub fn operand_index(
393 op: &OpTy<'tcx, M::PointerTag>,
395 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
396 if let Ok(index) = usize::try_from(index) {
397 // We can just treat this as a field.
398 self.operand_field(op, index)
400 // Indexing into a big array. This must be an mplace.
401 let mplace = op.assert_mem_place();
402 Ok(self.mplace_index(&mplace, index)?.into())
406 pub fn operand_downcast(
408 op: &OpTy<'tcx, M::PointerTag>,
410 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
411 // Downcasts only change the layout
412 Ok(match op.try_as_mplace() {
413 Ok(ref mplace) => self.mplace_downcast(mplace, variant)?.into(),
415 let layout = op.layout.for_variant(self, variant);
416 OpTy { layout, ..*op }
421 pub fn operand_projection(
423 base: &OpTy<'tcx, M::PointerTag>,
424 proj_elem: mir::PlaceElem<'tcx>,
425 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
426 use rustc_middle::mir::ProjectionElem::*;
428 Field(field, _) => self.operand_field(base, field.index())?,
429 Downcast(_, variant) => self.operand_downcast(base, variant)?,
430 Deref => self.deref_operand(base)?.into(),
431 Subslice { .. } | ConstantIndex { .. } | Index(_) => {
432 // The rest should only occur as mplace, we do not use Immediates for types
433 // allowing such operations. This matches place_projection forcing an allocation.
434 let mplace = base.assert_mem_place();
435 self.mplace_projection(&mplace, proj_elem)?.into()
440 /// Converts a repr(simd) operand into an operand where `place_index` accesses the SIMD elements.
441 /// Also returns the number of elements.
442 pub fn operand_to_simd(
444 base: &OpTy<'tcx, M::PointerTag>,
445 ) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::PointerTag>, u64)> {
446 // Basically we just transmute this place into an array following simd_size_and_type.
447 // This only works in memory, but repr(simd) types should never be immediates anyway.
448 assert!(base.layout.ty.is_simd());
449 self.mplace_to_simd(&base.assert_mem_place())
452 /// Read from a local. Will not actually access the local if reading from a ZST.
453 /// Will not access memory, instead an indirect `Operand` is returned.
455 /// This is public because it is used by [priroda](https://github.com/oli-obk/priroda) to get an
456 /// OpTy from a local
459 frame: &super::Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>,
461 layout: Option<TyAndLayout<'tcx>>,
462 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
463 let layout = self.layout_of_local(frame, local, layout)?;
464 let op = if layout.is_zst() {
465 // Do not read from ZST, they might not be initialized
466 Operand::Immediate(Scalar::ZST.into())
468 M::access_local(&self, frame, local)?
470 Ok(OpTy { op, layout })
473 /// Every place can be read from, so we can turn them into an operand.
474 /// This will definitely return `Indirect` if the place is a `Ptr`, i.e., this
475 /// will never actually read from memory.
479 place: &PlaceTy<'tcx, M::PointerTag>,
480 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
481 let op = match **place {
482 Place::Ptr(mplace) => Operand::Indirect(mplace),
483 Place::Local { frame, local } => {
484 *self.access_local(&self.stack()[frame], local, None)?
487 Ok(OpTy { op, layout: place.layout })
490 // Evaluate a place with the goal of reading from it. This lets us sometimes
491 // avoid allocations.
492 pub fn eval_place_to_op(
494 place: mir::Place<'tcx>,
495 layout: Option<TyAndLayout<'tcx>>,
496 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
497 // Do not use the layout passed in as argument if the base we are looking at
498 // here is not the entire place.
499 let layout = if place.projection.is_empty() { layout } else { None };
501 let base_op = self.access_local(self.frame(), place.local, layout)?;
506 .try_fold(base_op, |op, elem| self.operand_projection(&op, elem))?;
508 trace!("eval_place_to_op: got {:?}", *op);
509 // Sanity-check the type we ended up with.
510 debug_assert!(mir_assign_valid_types(
513 self.layout_of(self.subst_from_current_frame_and_normalize_erasing_regions(
514 place.ty(&self.frame().body.local_decls, *self.tcx).ty
521 /// Evaluate the operand, returning a place where you can then find the data.
522 /// If you already know the layout, you can save two table lookups
523 /// by passing it in here.
527 mir_op: &mir::Operand<'tcx>,
528 layout: Option<TyAndLayout<'tcx>>,
529 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
530 use rustc_middle::mir::Operand::*;
531 let op = match *mir_op {
532 // FIXME: do some more logic on `move` to invalidate the old location
533 Copy(place) | Move(place) => self.eval_place_to_op(place, layout)?,
535 Constant(ref constant) => {
537 self.subst_from_current_frame_and_normalize_erasing_regions(constant.literal);
538 // This can still fail:
539 // * During ConstProp, with `TooGeneric` or since the `requried_consts` were not all
541 // * During CTFE, since promoteds in `const`/`static` initializer bodies can fail.
543 self.mir_const_to_op(&val, layout)?
546 trace!("{:?}: {:?}", mir_op, *op);
550 /// Evaluate a bunch of operands at once
551 pub(super) fn eval_operands(
553 ops: &[mir::Operand<'tcx>],
554 ) -> InterpResult<'tcx, Vec<OpTy<'tcx, M::PointerTag>>> {
555 ops.iter().map(|op| self.eval_operand(op, None)).collect()
558 // Used when the miri-engine runs into a constant and for extracting information from constants
559 // in patterns via the `const_eval` module
560 /// The `val` and `layout` are assumed to already be in our interpreter
561 /// "universe" (param_env).
564 val: &ty::Const<'tcx>,
565 layout: Option<TyAndLayout<'tcx>>,
566 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
568 ty::ConstKind::Param(_) | ty::ConstKind::Bound(..) => throw_inval!(TooGeneric),
569 ty::ConstKind::Error(_) => throw_inval!(AlreadyReported(ErrorReported)),
570 ty::ConstKind::Unevaluated(uv) => {
571 let instance = self.resolve(uv.def, uv.substs(*self.tcx))?;
572 Ok(self.eval_to_allocation(GlobalId { instance, promoted: uv.promoted })?.into())
574 ty::ConstKind::Infer(..) | ty::ConstKind::Placeholder(..) => {
575 span_bug!(self.cur_span(), "const_to_op: Unexpected ConstKind {:?}", val)
577 ty::ConstKind::Value(val_val) => self.const_val_to_op(val_val, val.ty, layout),
581 pub fn mir_const_to_op(
583 val: &mir::ConstantKind<'tcx>,
584 layout: Option<TyAndLayout<'tcx>>,
585 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
587 mir::ConstantKind::Ty(ct) => self.const_to_op(ct, layout),
588 mir::ConstantKind::Val(val, ty) => self.const_val_to_op(*val, ty, layout),
592 crate fn const_val_to_op(
594 val_val: ConstValue<'tcx>,
596 layout: Option<TyAndLayout<'tcx>>,
597 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
598 // Other cases need layout.
599 let tag_scalar = |scalar| -> InterpResult<'tcx, _> {
601 Scalar::Ptr(ptr, size) => Scalar::Ptr(self.global_base_pointer(ptr)?, size),
602 Scalar::Int(int) => Scalar::Int(int),
605 let layout = from_known_layout(self.tcx, self.param_env, layout, || self.layout_of(ty))?;
606 let op = match val_val {
607 ConstValue::ByRef { alloc, offset } => {
608 let id = self.tcx.create_memory_alloc(alloc);
609 // We rely on mutability being set correctly in that allocation to prevent writes
610 // where none should happen.
611 let ptr = self.global_base_pointer(Pointer::new(id, offset))?;
612 Operand::Indirect(MemPlace::from_ptr(ptr.into(), layout.align.abi))
614 ConstValue::Scalar(x) => Operand::Immediate(tag_scalar(x)?.into()),
615 ConstValue::Slice { data, start, end } => {
616 // We rely on mutability being set correctly in `data` to prevent writes
617 // where none should happen.
618 let ptr = Pointer::new(
619 self.tcx.create_memory_alloc(data),
620 Size::from_bytes(start), // offset: `start`
622 Operand::Immediate(Immediate::new_slice(
623 Scalar::from_pointer(self.global_base_pointer(ptr)?, &*self.tcx),
624 u64::try_from(end.checked_sub(start).unwrap()).unwrap(), // len: `end - start`
629 Ok(OpTy { op, layout })
632 /// Read discriminant, return the runtime value as well as the variant index.
633 /// Can also legally be called on non-enums (e.g. through the discriminant_value intrinsic)!
634 pub fn read_discriminant(
636 op: &OpTy<'tcx, M::PointerTag>,
637 ) -> InterpResult<'tcx, (Scalar<M::PointerTag>, VariantIdx)> {
638 trace!("read_discriminant_value {:#?}", op.layout);
639 // Get type and layout of the discriminant.
640 let discr_layout = self.layout_of(op.layout.ty.discriminant_ty(*self.tcx))?;
641 trace!("discriminant type: {:?}", discr_layout.ty);
643 // We use "discriminant" to refer to the value associated with a particular enum variant.
644 // This is not to be confused with its "variant index", which is just determining its position in the
645 // declared list of variants -- they can differ with explicitly assigned discriminants.
646 // We use "tag" to refer to how the discriminant is encoded in memory, which can be either
647 // straight-forward (`TagEncoding::Direct`) or with a niche (`TagEncoding::Niche`).
648 let (tag_scalar_layout, tag_encoding, tag_field) = match op.layout.variants {
649 Variants::Single { index } => {
650 let discr = match op.layout.ty.discriminant_for_variant(*self.tcx, index) {
652 // This type actually has discriminants.
653 assert_eq!(discr.ty, discr_layout.ty);
654 Scalar::from_uint(discr.val, discr_layout.size)
657 // On a type without actual discriminants, variant is 0.
658 assert_eq!(index.as_u32(), 0);
659 Scalar::from_uint(index.as_u32(), discr_layout.size)
662 return Ok((discr, index));
664 Variants::Multiple { tag, ref tag_encoding, tag_field, .. } => {
665 (tag, tag_encoding, tag_field)
669 // There are *three* layouts that come into play here:
670 // - The discriminant has a type for typechecking. This is `discr_layout`, and is used for
671 // the `Scalar` we return.
672 // - The tag (encoded discriminant) has layout `tag_layout`. This is always an integer type,
673 // and used to interpret the value we read from the tag field.
674 // For the return value, a cast to `discr_layout` is performed.
675 // - The field storing the tag has a layout, which is very similar to `tag_layout` but
676 // may be a pointer. This is `tag_val.layout`; we just use it for sanity checks.
678 // Get layout for tag.
679 let tag_layout = self.layout_of(tag_scalar_layout.value.to_int_ty(*self.tcx))?;
681 // Read tag and sanity-check `tag_layout`.
682 let tag_val = self.read_immediate(&self.operand_field(op, tag_field)?)?;
683 assert_eq!(tag_layout.size, tag_val.layout.size);
684 assert_eq!(tag_layout.abi.is_signed(), tag_val.layout.abi.is_signed());
685 let tag_val = tag_val.to_scalar()?;
686 trace!("tag value: {:?}", tag_val);
688 // Figure out which discriminant and variant this corresponds to.
689 Ok(match *tag_encoding {
690 TagEncoding::Direct => {
691 let tag_bits = tag_val
693 .map_err(|dbg_val| err_ub!(InvalidTag(dbg_val)))?
694 .assert_bits(tag_layout.size);
695 // Cast bits from tag layout to discriminant layout.
696 let discr_val = self.cast_from_scalar(tag_bits, tag_layout, discr_layout.ty);
697 let discr_bits = discr_val.assert_bits(discr_layout.size);
698 // Convert discriminant to variant index, and catch invalid discriminants.
699 let index = match *op.layout.ty.kind() {
701 adt.discriminants(*self.tcx).find(|(_, var)| var.val == discr_bits)
703 ty::Generator(def_id, substs, _) => {
704 let substs = substs.as_generator();
706 .discriminants(def_id, *self.tcx)
707 .find(|(_, var)| var.val == discr_bits)
709 _ => span_bug!(self.cur_span(), "tagged layout for non-adt non-generator"),
711 .ok_or_else(|| err_ub!(InvalidTag(Scalar::from_uint(tag_bits, tag_layout.size))))?;
712 // Return the cast value, and the index.
715 TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start } => {
716 // Compute the variant this niche value/"tag" corresponds to. With niche layout,
717 // discriminant (encoded in niche/tag) and variant index are the same.
718 let variants_start = niche_variants.start().as_u32();
719 let variants_end = niche_variants.end().as_u32();
720 let variant = match tag_val.try_to_int() {
722 // So this is a pointer then, and casting to an int failed.
723 // Can only happen during CTFE.
724 let ptr = self.scalar_to_ptr(tag_val);
725 // The niche must be just 0, and the ptr not null, then we know this is
726 // okay. Everything else, we conservatively reject.
727 let ptr_valid = niche_start == 0
728 && variants_start == variants_end
729 && !self.memory.ptr_may_be_null(ptr);
731 throw_ub!(InvalidTag(dbg_val))
736 let tag_bits = tag_bits.assert_bits(tag_layout.size);
737 // We need to use machine arithmetic to get the relative variant idx:
738 // variant_index_relative = tag_val - niche_start_val
739 let tag_val = ImmTy::from_uint(tag_bits, tag_layout);
740 let niche_start_val = ImmTy::from_uint(niche_start, tag_layout);
741 let variant_index_relative_val =
742 self.binary_op(mir::BinOp::Sub, &tag_val, &niche_start_val)?;
743 let variant_index_relative = variant_index_relative_val
745 .assert_bits(tag_val.layout.size);
746 // Check if this is in the range that indicates an actual discriminant.
747 if variant_index_relative <= u128::from(variants_end - variants_start) {
748 let variant_index_relative = u32::try_from(variant_index_relative)
749 .expect("we checked that this fits into a u32");
750 // Then computing the absolute variant idx should not overflow any more.
751 let variant_index = variants_start
752 .checked_add(variant_index_relative)
753 .expect("overflow computing absolute variant idx");
754 let variants_len = op
758 .expect("tagged layout for non adt")
761 assert!(usize::try_from(variant_index).unwrap() < variants_len);
762 VariantIdx::from_u32(variant_index)
768 // Compute the size of the scalar we need to return.
769 // No need to cast, because the variant index directly serves as discriminant and is
770 // encoded in the tag.
771 (Scalar::from_uint(variant.as_u32(), discr_layout.size), variant)