1 //! Functions concerning immediate values and operands, and reading from operands.
2 //! All high-level functions to read from memory work on operands as sources.
4 use std::convert::TryFrom;
7 use rustc_hir::def::Namespace;
8 use rustc_macros::HashStable;
9 use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt, TyAndLayout};
10 use rustc_middle::ty::print::{FmtPrinter, PrettyPrinter, Printer};
11 use rustc_middle::ty::{ConstInt, DelaySpanBugEmitted, Ty};
12 use rustc_middle::{mir, ty};
13 use rustc_target::abi::{self, Abi, Align, HasDataLayout, Size, TagEncoding};
14 use rustc_target::abi::{VariantIdx, Variants};
17 alloc_range, from_known_layout, mir_assign_valid_types, AllocId, ConstValue, Frame, GlobalId,
18 InterpCx, InterpResult, MPlaceTy, Machine, MemPlace, Place, PlaceTy, Pointer,
19 PointerArithmetic, Provenance, Scalar, ScalarMaybeUninit,
22 /// An `Immediate` represents a single immediate self-contained Rust value.
24 /// For optimization of a few very common cases, there is also a representation for a pair of
25 /// primitive values (`ScalarPair`). It allows Miri to avoid making allocations for checked binary
26 /// operations and wide pointers. This idea was taken from rustc's codegen.
27 /// In particular, thanks to `ScalarPair`, arithmetic operations and casts can be entirely
28 /// defined on `Immediate`, and do not have to work with a `Place`.
29 #[derive(Copy, Clone, PartialEq, Eq, HashStable, Hash, Debug)]
30 pub enum Immediate<Tag: Provenance = AllocId> {
31 /// A single scalar value (must have *initialized* `Scalar` ABI).
32 /// FIXME: we also currently often use this for ZST.
33 /// `ScalarMaybeUninit` should reject ZST, and we should use `Uninit` for them instead.
34 Scalar(ScalarMaybeUninit<Tag>),
35 /// A pair of two scalar value (must have `ScalarPair` ABI where both fields are
36 /// `Scalar::Initialized`).
37 ScalarPair(ScalarMaybeUninit<Tag>, ScalarMaybeUninit<Tag>),
38 /// A value of fully uninitialized memory. Can have and size and layout.
42 #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
43 rustc_data_structures::static_assert_size!(Immediate, 56);
45 impl<Tag: Provenance> From<ScalarMaybeUninit<Tag>> for Immediate<Tag> {
47 fn from(val: ScalarMaybeUninit<Tag>) -> Self {
48 Immediate::Scalar(val)
52 impl<Tag: Provenance> From<Scalar<Tag>> for Immediate<Tag> {
54 fn from(val: Scalar<Tag>) -> Self {
55 Immediate::Scalar(val.into())
59 impl<'tcx, Tag: Provenance> Immediate<Tag> {
60 pub fn from_pointer(p: Pointer<Tag>, cx: &impl HasDataLayout) -> Self {
61 Immediate::Scalar(ScalarMaybeUninit::from_pointer(p, cx))
64 pub fn from_maybe_pointer(p: Pointer<Option<Tag>>, cx: &impl HasDataLayout) -> Self {
65 Immediate::Scalar(ScalarMaybeUninit::from_maybe_pointer(p, cx))
68 pub fn new_slice(val: Scalar<Tag>, len: u64, cx: &impl HasDataLayout) -> Self {
69 Immediate::ScalarPair(val.into(), Scalar::from_machine_usize(len, cx).into())
74 vtable: Pointer<Option<Tag>>,
75 cx: &impl HasDataLayout,
77 Immediate::ScalarPair(val.into(), ScalarMaybeUninit::from_maybe_pointer(vtable, cx))
81 pub fn to_scalar_or_uninit(self) -> ScalarMaybeUninit<Tag> {
83 Immediate::Scalar(val) => val,
84 Immediate::ScalarPair(..) => bug!("Got a scalar pair where a scalar was expected"),
85 Immediate::Uninit => ScalarMaybeUninit::Uninit,
90 pub fn to_scalar(self) -> InterpResult<'tcx, Scalar<Tag>> {
91 self.to_scalar_or_uninit().check_init()
95 pub fn to_scalar_or_uninit_pair(self) -> (ScalarMaybeUninit<Tag>, ScalarMaybeUninit<Tag>) {
97 Immediate::ScalarPair(val1, val2) => (val1, val2),
98 Immediate::Scalar(..) => bug!("Got a scalar where a scalar pair was expected"),
99 Immediate::Uninit => (ScalarMaybeUninit::Uninit, ScalarMaybeUninit::Uninit),
104 pub fn to_scalar_pair(self) -> InterpResult<'tcx, (Scalar<Tag>, Scalar<Tag>)> {
105 let (val1, val2) = self.to_scalar_or_uninit_pair();
106 Ok((val1.check_init()?, val2.check_init()?))
110 // ScalarPair needs a type to interpret, so we often have an immediate and a type together
111 // as input for binary and cast operations.
112 #[derive(Copy, Clone, Debug)]
113 pub struct ImmTy<'tcx, Tag: Provenance = AllocId> {
115 pub layout: TyAndLayout<'tcx>,
118 #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
119 rustc_data_structures::static_assert_size!(ImmTy<'_>, 72);
121 impl<Tag: Provenance> std::fmt::Display for ImmTy<'_, Tag> {
122 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
123 /// Helper function for printing a scalar to a FmtPrinter
124 fn p<'a, 'tcx, Tag: Provenance>(
125 cx: FmtPrinter<'a, 'tcx>,
126 s: ScalarMaybeUninit<Tag>,
128 ) -> Result<FmtPrinter<'a, 'tcx>, std::fmt::Error> {
130 ScalarMaybeUninit::Scalar(Scalar::Int(int)) => {
131 cx.pretty_print_const_scalar_int(int, ty, true)
133 ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr, _sz)) => {
134 // Just print the ptr value. `pretty_print_const_scalar_ptr` would also try to
135 // print what is points to, which would fail since it has no access to the local
137 cx.pretty_print_const_pointer(ptr, ty, true)
139 ScalarMaybeUninit::Uninit => cx.typed_value(
141 this.write_str("uninit ")?;
144 |this| this.print_type(ty),
149 ty::tls::with(|tcx| {
151 Immediate::Scalar(s) => {
152 if let Some(ty) = tcx.lift(self.layout.ty) {
153 let cx = FmtPrinter::new(tcx, Namespace::ValueNS);
154 f.write_str(&p(cx, s, ty)?.into_buffer())?;
157 write!(f, "{:x}: {}", s, self.layout.ty)
159 Immediate::ScalarPair(a, b) => {
160 // FIXME(oli-obk): at least print tuples and slices nicely
161 write!(f, "({:x}, {:x}): {}", a, b, self.layout.ty)
163 Immediate::Uninit => {
164 write!(f, "uninit: {}", self.layout.ty)
171 impl<'tcx, Tag: Provenance> std::ops::Deref for ImmTy<'tcx, Tag> {
172 type Target = Immediate<Tag>;
174 fn deref(&self) -> &Immediate<Tag> {
179 /// An `Operand` is the result of computing a `mir::Operand`. It can be immediate,
180 /// or still in memory. The latter is an optimization, to delay reading that chunk of
181 /// memory and to avoid having to store arbitrary-sized data here.
182 #[derive(Copy, Clone, PartialEq, Eq, HashStable, Hash, Debug)]
183 pub enum Operand<Tag: Provenance = AllocId> {
184 Immediate(Immediate<Tag>),
185 Indirect(MemPlace<Tag>),
188 #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
189 pub struct OpTy<'tcx, Tag: Provenance = AllocId> {
190 op: Operand<Tag>, // Keep this private; it helps enforce invariants.
191 pub layout: TyAndLayout<'tcx>,
192 /// rustc does not have a proper way to represent the type of a field of a `repr(packed)` struct:
193 /// it needs to have a different alignment than the field type would usually have.
194 /// So we represent this here with a separate field that "overwrites" `layout.align`.
195 /// This means `layout.align` should never be used for an `OpTy`!
196 /// `None` means "alignment does not matter since this is a by-value operand"
197 /// (`Operand::Immediate`); this field is only relevant for `Operand::Indirect`.
198 /// Also CTFE ignores alignment anyway, so this is for Miri only.
199 pub align: Option<Align>,
202 #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
203 rustc_data_structures::static_assert_size!(OpTy<'_>, 88);
205 impl<'tcx, Tag: Provenance> std::ops::Deref for OpTy<'tcx, Tag> {
206 type Target = Operand<Tag>;
208 fn deref(&self) -> &Operand<Tag> {
213 impl<'tcx, Tag: Provenance> From<MPlaceTy<'tcx, Tag>> for OpTy<'tcx, Tag> {
215 fn from(mplace: MPlaceTy<'tcx, Tag>) -> Self {
216 OpTy { op: Operand::Indirect(*mplace), layout: mplace.layout, align: Some(mplace.align) }
220 impl<'tcx, Tag: Provenance> From<&'_ MPlaceTy<'tcx, Tag>> for OpTy<'tcx, Tag> {
222 fn from(mplace: &MPlaceTy<'tcx, Tag>) -> Self {
223 OpTy { op: Operand::Indirect(**mplace), layout: mplace.layout, align: Some(mplace.align) }
227 impl<'tcx, Tag: Provenance> From<&'_ mut MPlaceTy<'tcx, Tag>> for OpTy<'tcx, Tag> {
229 fn from(mplace: &mut MPlaceTy<'tcx, Tag>) -> Self {
230 OpTy { op: Operand::Indirect(**mplace), layout: mplace.layout, align: Some(mplace.align) }
234 impl<'tcx, Tag: Provenance> From<ImmTy<'tcx, Tag>> for OpTy<'tcx, Tag> {
236 fn from(val: ImmTy<'tcx, Tag>) -> Self {
237 OpTy { op: Operand::Immediate(val.imm), layout: val.layout, align: None }
241 impl<'tcx, Tag: Provenance> ImmTy<'tcx, Tag> {
243 pub fn from_scalar(val: Scalar<Tag>, layout: TyAndLayout<'tcx>) -> Self {
244 ImmTy { imm: val.into(), layout }
248 pub fn from_immediate(imm: Immediate<Tag>, layout: TyAndLayout<'tcx>) -> Self {
249 ImmTy { imm, layout }
253 pub fn try_from_uint(i: impl Into<u128>, layout: TyAndLayout<'tcx>) -> Option<Self> {
254 Some(Self::from_scalar(Scalar::try_from_uint(i, layout.size)?, layout))
257 pub fn from_uint(i: impl Into<u128>, layout: TyAndLayout<'tcx>) -> Self {
258 Self::from_scalar(Scalar::from_uint(i, layout.size), layout)
262 pub fn try_from_int(i: impl Into<i128>, layout: TyAndLayout<'tcx>) -> Option<Self> {
263 Some(Self::from_scalar(Scalar::try_from_int(i, layout.size)?, layout))
267 pub fn from_int(i: impl Into<i128>, layout: TyAndLayout<'tcx>) -> Self {
268 Self::from_scalar(Scalar::from_int(i, layout.size), layout)
272 pub fn to_const_int(self) -> ConstInt {
273 assert!(self.layout.ty.is_integral());
274 let int = self.to_scalar().expect("to_const_int doesn't work on scalar pairs").assert_int();
275 ConstInt::new(int, self.layout.ty.is_signed(), self.layout.ty.is_ptr_sized_integral())
279 impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
280 /// Try reading an immediate in memory; this is interesting particularly for `ScalarPair`.
281 /// Returns `None` if the layout does not permit loading this as a value.
283 /// This is an internal function; call `read_immediate` instead.
284 fn read_immediate_from_mplace_raw(
286 mplace: &MPlaceTy<'tcx, M::PointerTag>,
288 ) -> InterpResult<'tcx, Option<ImmTy<'tcx, M::PointerTag>>> {
289 if mplace.layout.is_unsized() {
290 // Don't touch unsized
294 let Some(alloc) = self.get_place_alloc(mplace)? else {
295 return Ok(Some(ImmTy {
297 imm: Scalar::ZST.into(),
298 layout: mplace.layout,
302 // It may seem like all types with `Scalar` or `ScalarPair` ABI are fair game at this point.
303 // However, `MaybeUninit<u64>` is considered a `Scalar` as far as its layout is concerned --
304 // and yet cannot be represented by an interpreter `Scalar`, since we have to handle the
305 // case where some of the bytes are initialized and others are not. So, we need an extra
306 // check that walks over the type of `mplace` to make sure it is truly correct to treat this
307 // like a `Scalar` (or `ScalarPair`).
308 let scalar_layout = match mplace.layout.abi {
309 // `if` does not work nested inside patterns, making this a bit awkward to express.
310 Abi::Scalar(abi::Scalar::Initialized { value: s, .. }) => Some(s),
311 Abi::Scalar(s) if force => Some(s.primitive()),
314 let read_provenance = |s: abi::Primitive, size| {
315 // Should be just `s.is_ptr()`, but we support a Miri flag that accepts more
316 // questionable ptr-int transmutes.
317 let number_may_have_provenance = !M::enforce_number_no_provenance(self);
318 s.is_ptr() || (number_may_have_provenance && size == self.pointer_size())
320 if let Some(s) = scalar_layout {
321 let size = s.size(self);
322 assert_eq!(size, mplace.layout.size, "abi::Scalar size does not match layout size");
324 alloc.read_scalar(alloc_range(Size::ZERO, size), read_provenance(s, size))?;
325 return Ok(Some(ImmTy { imm: scalar.into(), layout: mplace.layout }));
327 let scalar_pair_layout = match mplace.layout.abi {
329 abi::Scalar::Initialized { value: a, .. },
330 abi::Scalar::Initialized { value: b, .. },
332 Abi::ScalarPair(a, b) if force => Some((a.primitive(), b.primitive())),
335 if let Some((a, b)) = scalar_pair_layout {
336 // We checked `ptr_align` above, so all fields will have the alignment they need.
337 // We would anyway check against `ptr_align.restrict_for_offset(b_offset)`,
338 // which `ptr.offset(b_offset)` cannot possibly fail to satisfy.
339 let (a_size, b_size) = (a.size(self), b.size(self));
340 let b_offset = a_size.align_to(b.align(self).abi);
341 assert!(b_offset.bytes() > 0); // in `operand_field` we use the offset to tell apart the fields
343 alloc.read_scalar(alloc_range(Size::ZERO, a_size), read_provenance(a, a_size))?;
345 alloc.read_scalar(alloc_range(b_offset, b_size), read_provenance(b, b_size))?;
346 return Ok(Some(ImmTy {
347 imm: Immediate::ScalarPair(a_val, b_val),
348 layout: mplace.layout,
351 // Neither a scalar nor scalar pair.
355 /// Try returning an immediate for the operand. If the layout does not permit loading this as an
356 /// immediate, return where in memory we can find the data.
357 /// Note that for a given layout, this operation will either always fail or always
358 /// succeed! Whether it succeeds depends on whether the layout can be represented
359 /// in an `Immediate`, not on which data is stored there currently.
361 /// If `force` is `true`, then even scalars with fields that can be ununit will be
362 /// read. This means the load is lossy and should not be written back!
363 /// This flag exists only for validity checking.
365 /// This is an internal function that should not usually be used; call `read_immediate` instead.
366 pub fn read_immediate_raw(
368 src: &OpTy<'tcx, M::PointerTag>,
370 ) -> InterpResult<'tcx, Result<ImmTy<'tcx, M::PointerTag>, MPlaceTy<'tcx, M::PointerTag>>> {
371 Ok(match src.try_as_mplace() {
373 if let Some(val) = self.read_immediate_from_mplace_raw(mplace, force)? {
383 /// Read an immediate from a place, asserting that that is possible with the given layout.
385 pub fn read_immediate(
387 op: &OpTy<'tcx, M::PointerTag>,
388 ) -> InterpResult<'tcx, ImmTy<'tcx, M::PointerTag>> {
389 if let Ok(imm) = self.read_immediate_raw(op, /*force*/ false)? {
392 span_bug!(self.cur_span(), "primitive read failed for type: {:?}", op.layout.ty);
396 /// Read a scalar from a place
399 op: &OpTy<'tcx, M::PointerTag>,
400 ) -> InterpResult<'tcx, ScalarMaybeUninit<M::PointerTag>> {
401 Ok(self.read_immediate(op)?.to_scalar_or_uninit())
404 /// Read a pointer from a place.
407 op: &OpTy<'tcx, M::PointerTag>,
408 ) -> InterpResult<'tcx, Pointer<Option<M::PointerTag>>> {
409 self.scalar_to_ptr(self.read_scalar(op)?.check_init()?)
412 /// Turn the wide MPlace into a string (must already be dereferenced!)
413 pub fn read_str(&self, mplace: &MPlaceTy<'tcx, M::PointerTag>) -> InterpResult<'tcx, &str> {
414 let len = mplace.len(self)?;
415 let bytes = self.read_bytes_ptr(mplace.ptr, Size::from_bytes(len))?;
416 let str = std::str::from_utf8(bytes).map_err(|err| err_ub!(InvalidStr(err)))?;
420 /// Projection functions
421 pub fn operand_field(
423 op: &OpTy<'tcx, M::PointerTag>,
425 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
426 let base = match op.try_as_mplace() {
428 // We can reuse the mplace field computation logic for indirect operands.
429 let field = self.mplace_field(mplace, field)?;
430 return Ok(field.into());
435 let field_layout = base.layout.field(self, field);
436 let offset = base.layout.fields.offset(field);
437 // This makes several assumptions about what layouts we will encounter; we match what
438 // codegen does as good as we can (see `extract_field` in `rustc_codegen_ssa/src/mir/operand.rs`).
439 let field_val: Immediate<_> = match (*base, base.layout.abi) {
440 // the field contains no information
441 _ if field_layout.is_zst() => Scalar::ZST.into(),
442 // the field covers the entire type
443 _ if field_layout.size == base.layout.size => {
444 assert!(match (base.layout.abi, field_layout.abi) {
445 (Abi::Scalar(..), Abi::Scalar(..)) => true,
446 (Abi::ScalarPair(..), Abi::ScalarPair(..)) => true,
449 assert!(offset.bytes() == 0);
452 // extract fields from types with `ScalarPair` ABI
453 (Immediate::ScalarPair(a_val, b_val), Abi::ScalarPair(a, b)) => {
454 assert!(matches!(field_layout.abi, Abi::Scalar(..)));
455 Immediate::from(if offset.bytes() == 0 {
456 debug_assert_eq!(field_layout.size, a.size(self));
459 debug_assert_eq!(offset, a.size(self).align_to(b.align(self).abi));
460 debug_assert_eq!(field_layout.size, b.size(self));
466 "invalid field access on immediate {}, layout {:#?}",
472 Ok(OpTy { op: Operand::Immediate(field_val), layout: field_layout, align: None })
475 pub fn operand_index(
477 op: &OpTy<'tcx, M::PointerTag>,
479 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
480 if let Ok(index) = usize::try_from(index) {
481 // We can just treat this as a field.
482 self.operand_field(op, index)
484 // Indexing into a big array. This must be an mplace.
485 let mplace = op.assert_mem_place();
486 Ok(self.mplace_index(&mplace, index)?.into())
490 pub fn operand_downcast(
492 op: &OpTy<'tcx, M::PointerTag>,
494 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
495 Ok(match op.try_as_mplace() {
496 Ok(ref mplace) => self.mplace_downcast(mplace, variant)?.into(),
498 // Downcasts only change the layout.
499 // (In particular, no check about whether this is even the active variant -- that's by design,
500 // see https://github.com/rust-lang/rust/issues/93688#issuecomment-1032929496.)
501 let layout = op.layout.for_variant(self, variant);
502 OpTy { layout, ..*op }
507 #[instrument(skip(self), level = "debug")]
508 pub fn operand_projection(
510 base: &OpTy<'tcx, M::PointerTag>,
511 proj_elem: mir::PlaceElem<'tcx>,
512 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
513 use rustc_middle::mir::ProjectionElem::*;
515 Field(field, _) => self.operand_field(base, field.index())?,
516 Downcast(_, variant) => self.operand_downcast(base, variant)?,
517 Deref => self.deref_operand(base)?.into(),
518 Subslice { .. } | ConstantIndex { .. } | Index(_) => {
519 // The rest should only occur as mplace, we do not use Immediates for types
520 // allowing such operations. This matches place_projection forcing an allocation.
521 let mplace = base.assert_mem_place();
522 self.mplace_projection(&mplace, proj_elem)?.into()
527 /// Converts a repr(simd) operand into an operand where `place_index` accesses the SIMD elements.
528 /// Also returns the number of elements.
529 pub fn operand_to_simd(
531 base: &OpTy<'tcx, M::PointerTag>,
532 ) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::PointerTag>, u64)> {
533 // Basically we just transmute this place into an array following simd_size_and_type.
534 // This only works in memory, but repr(simd) types should never be immediates anyway.
535 assert!(base.layout.ty.is_simd());
536 self.mplace_to_simd(&base.assert_mem_place())
539 /// Read from a local. Will not actually access the local if reading from a ZST.
540 /// Will not access memory, instead an indirect `Operand` is returned.
542 /// This is public because it is used by [priroda](https://github.com/oli-obk/priroda) to get an
543 /// OpTy from a local.
546 frame: &Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>,
548 layout: Option<TyAndLayout<'tcx>>,
549 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
550 let layout = self.layout_of_local(frame, local, layout)?;
551 let op = if layout.is_zst() {
552 // Do not read from ZST, they might not be initialized
553 Operand::Immediate(Scalar::ZST.into())
555 *M::access_local(frame, local)?
557 Ok(OpTy { op, layout, align: Some(layout.align.abi) })
560 /// Every place can be read from, so we can turn them into an operand.
561 /// This will definitely return `Indirect` if the place is a `Ptr`, i.e., this
562 /// will never actually read from memory.
566 place: &PlaceTy<'tcx, M::PointerTag>,
567 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
568 let op = match **place {
569 Place::Ptr(mplace) => Operand::Indirect(mplace),
570 Place::Local { frame, local } => {
571 *self.local_to_op(&self.stack()[frame], local, None)?
574 Ok(OpTy { op, layout: place.layout, align: Some(place.align) })
577 /// Evaluate a place with the goal of reading from it. This lets us sometimes
578 /// avoid allocations.
579 pub fn eval_place_to_op(
581 place: mir::Place<'tcx>,
582 layout: Option<TyAndLayout<'tcx>>,
583 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
584 // Do not use the layout passed in as argument if the base we are looking at
585 // here is not the entire place.
586 let layout = if place.projection.is_empty() { layout } else { None };
588 let base_op = self.local_to_op(self.frame(), place.local, layout)?;
593 .try_fold(base_op, |op, elem| self.operand_projection(&op, elem))?;
595 trace!("eval_place_to_op: got {:?}", *op);
596 // Sanity-check the type we ended up with.
597 debug_assert!(mir_assign_valid_types(
600 self.layout_of(self.subst_from_current_frame_and_normalize_erasing_regions(
601 place.ty(&self.frame().body.local_decls, *self.tcx).ty
608 /// Evaluate the operand, returning a place where you can then find the data.
609 /// If you already know the layout, you can save two table lookups
610 /// by passing it in here.
614 mir_op: &mir::Operand<'tcx>,
615 layout: Option<TyAndLayout<'tcx>>,
616 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
617 use rustc_middle::mir::Operand::*;
618 let op = match *mir_op {
619 // FIXME: do some more logic on `move` to invalidate the old location
620 Copy(place) | Move(place) => self.eval_place_to_op(place, layout)?,
622 Constant(ref constant) => {
624 self.subst_from_current_frame_and_normalize_erasing_regions(constant.literal)?;
626 // This can still fail:
627 // * During ConstProp, with `TooGeneric` or since the `required_consts` were not all
629 // * During CTFE, since promoteds in `const`/`static` initializer bodies can fail.
630 self.mir_const_to_op(&val, layout)?
633 trace!("{:?}: {:?}", mir_op, *op);
637 /// Evaluate a bunch of operands at once
638 pub(super) fn eval_operands(
640 ops: &[mir::Operand<'tcx>],
641 ) -> InterpResult<'tcx, Vec<OpTy<'tcx, M::PointerTag>>> {
642 ops.iter().map(|op| self.eval_operand(op, None)).collect()
645 // Used when the miri-engine runs into a constant and for extracting information from constants
646 // in patterns via the `const_eval` module
647 /// The `val` and `layout` are assumed to already be in our interpreter
648 /// "universe" (param_env).
652 layout: Option<TyAndLayout<'tcx>>,
653 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
655 ty::ConstKind::Param(_) | ty::ConstKind::Bound(..) => throw_inval!(TooGeneric),
656 ty::ConstKind::Error(DelaySpanBugEmitted { reported, .. }) => {
657 throw_inval!(AlreadyReported(reported))
659 ty::ConstKind::Unevaluated(uv) => {
660 let instance = self.resolve(uv.def, uv.substs)?;
661 Ok(self.eval_to_allocation(GlobalId { instance, promoted: uv.promoted })?.into())
663 ty::ConstKind::Infer(..) | ty::ConstKind::Placeholder(..) => {
664 span_bug!(self.cur_span(), "const_to_op: Unexpected ConstKind {:?}", c)
666 ty::ConstKind::Value(valtree) => {
668 let const_val = self.tcx.valtree_to_const_val((ty, valtree));
669 self.const_val_to_op(const_val, ty, layout)
674 pub fn mir_const_to_op(
676 val: &mir::ConstantKind<'tcx>,
677 layout: Option<TyAndLayout<'tcx>>,
678 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
680 mir::ConstantKind::Ty(ct) => self.const_to_op(*ct, layout),
681 mir::ConstantKind::Val(val, ty) => self.const_val_to_op(*val, *ty, layout),
685 pub(crate) fn const_val_to_op(
687 val_val: ConstValue<'tcx>,
689 layout: Option<TyAndLayout<'tcx>>,
690 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
691 // Other cases need layout.
692 let tag_scalar = |scalar| -> InterpResult<'tcx, _> {
694 Scalar::Ptr(ptr, size) => Scalar::Ptr(self.global_base_pointer(ptr)?, size),
695 Scalar::Int(int) => Scalar::Int(int),
698 let layout = from_known_layout(self.tcx, self.param_env, layout, || self.layout_of(ty))?;
699 let op = match val_val {
700 ConstValue::ByRef { alloc, offset } => {
701 let id = self.tcx.create_memory_alloc(alloc);
702 // We rely on mutability being set correctly in that allocation to prevent writes
703 // where none should happen.
704 let ptr = self.global_base_pointer(Pointer::new(id, offset))?;
705 Operand::Indirect(MemPlace::from_ptr(ptr.into()))
707 ConstValue::Scalar(x) => Operand::Immediate(tag_scalar(x)?.into()),
708 ConstValue::Slice { data, start, end } => {
709 // We rely on mutability being set correctly in `data` to prevent writes
710 // where none should happen.
711 let ptr = Pointer::new(
712 self.tcx.create_memory_alloc(data),
713 Size::from_bytes(start), // offset: `start`
715 Operand::Immediate(Immediate::new_slice(
716 Scalar::from_pointer(self.global_base_pointer(ptr)?, &*self.tcx),
717 u64::try_from(end.checked_sub(start).unwrap()).unwrap(), // len: `end - start`
722 Ok(OpTy { op, layout, align: Some(layout.align.abi) })
725 /// Read discriminant, return the runtime value as well as the variant index.
726 /// Can also legally be called on non-enums (e.g. through the discriminant_value intrinsic)!
727 pub fn read_discriminant(
729 op: &OpTy<'tcx, M::PointerTag>,
730 ) -> InterpResult<'tcx, (Scalar<M::PointerTag>, VariantIdx)> {
731 trace!("read_discriminant_value {:#?}", op.layout);
732 // Get type and layout of the discriminant.
733 let discr_layout = self.layout_of(op.layout.ty.discriminant_ty(*self.tcx))?;
734 trace!("discriminant type: {:?}", discr_layout.ty);
736 // We use "discriminant" to refer to the value associated with a particular enum variant.
737 // This is not to be confused with its "variant index", which is just determining its position in the
738 // declared list of variants -- they can differ with explicitly assigned discriminants.
739 // We use "tag" to refer to how the discriminant is encoded in memory, which can be either
740 // straight-forward (`TagEncoding::Direct`) or with a niche (`TagEncoding::Niche`).
741 let (tag_scalar_layout, tag_encoding, tag_field) = match op.layout.variants {
742 Variants::Single { index } => {
743 let discr = match op.layout.ty.discriminant_for_variant(*self.tcx, index) {
745 // This type actually has discriminants.
746 assert_eq!(discr.ty, discr_layout.ty);
747 Scalar::from_uint(discr.val, discr_layout.size)
750 // On a type without actual discriminants, variant is 0.
751 assert_eq!(index.as_u32(), 0);
752 Scalar::from_uint(index.as_u32(), discr_layout.size)
755 return Ok((discr, index));
757 Variants::Multiple { tag, ref tag_encoding, tag_field, .. } => {
758 (tag, tag_encoding, tag_field)
762 // There are *three* layouts that come into play here:
763 // - The discriminant has a type for typechecking. This is `discr_layout`, and is used for
764 // the `Scalar` we return.
765 // - The tag (encoded discriminant) has layout `tag_layout`. This is always an integer type,
766 // and used to interpret the value we read from the tag field.
767 // For the return value, a cast to `discr_layout` is performed.
768 // - The field storing the tag has a layout, which is very similar to `tag_layout` but
769 // may be a pointer. This is `tag_val.layout`; we just use it for sanity checks.
771 // Get layout for tag.
772 let tag_layout = self.layout_of(tag_scalar_layout.primitive().to_int_ty(*self.tcx))?;
774 // Read tag and sanity-check `tag_layout`.
775 let tag_val = self.read_immediate(&self.operand_field(op, tag_field)?)?;
776 assert_eq!(tag_layout.size, tag_val.layout.size);
777 assert_eq!(tag_layout.abi.is_signed(), tag_val.layout.abi.is_signed());
778 trace!("tag value: {}", tag_val);
780 // Figure out which discriminant and variant this corresponds to.
781 Ok(match *tag_encoding {
782 TagEncoding::Direct => {
783 let scalar = tag_val.to_scalar()?;
784 // Generate a specific error if `tag_val` is not an integer.
785 // (`tag_bits` itself is only used for error messages below.)
786 let tag_bits = scalar
788 .map_err(|dbg_val| err_ub!(InvalidTag(dbg_val)))?
789 .assert_bits(tag_layout.size);
790 // Cast bits from tag layout to discriminant layout.
791 // After the checks we did above, this cannot fail, as
792 // discriminants are int-like.
794 self.cast_from_int_like(scalar, tag_val.layout, discr_layout.ty).unwrap();
795 let discr_bits = discr_val.assert_bits(discr_layout.size);
796 // Convert discriminant to variant index, and catch invalid discriminants.
797 let index = match *op.layout.ty.kind() {
799 adt.discriminants(*self.tcx).find(|(_, var)| var.val == discr_bits)
801 ty::Generator(def_id, substs, _) => {
802 let substs = substs.as_generator();
804 .discriminants(def_id, *self.tcx)
805 .find(|(_, var)| var.val == discr_bits)
807 _ => span_bug!(self.cur_span(), "tagged layout for non-adt non-generator"),
809 .ok_or_else(|| err_ub!(InvalidTag(Scalar::from_uint(tag_bits, tag_layout.size))))?;
810 // Return the cast value, and the index.
813 TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start } => {
814 let tag_val = tag_val.to_scalar()?;
815 // Compute the variant this niche value/"tag" corresponds to. With niche layout,
816 // discriminant (encoded in niche/tag) and variant index are the same.
817 let variants_start = niche_variants.start().as_u32();
818 let variants_end = niche_variants.end().as_u32();
819 let variant = match tag_val.try_to_int() {
821 // So this is a pointer then, and casting to an int failed.
822 // Can only happen during CTFE.
823 // The niche must be just 0, and the ptr not null, then we know this is
824 // okay. Everything else, we conservatively reject.
825 let ptr_valid = niche_start == 0
826 && variants_start == variants_end
827 && !self.scalar_may_be_null(tag_val)?;
829 throw_ub!(InvalidTag(dbg_val))
834 let tag_bits = tag_bits.assert_bits(tag_layout.size);
835 // We need to use machine arithmetic to get the relative variant idx:
836 // variant_index_relative = tag_val - niche_start_val
837 let tag_val = ImmTy::from_uint(tag_bits, tag_layout);
838 let niche_start_val = ImmTy::from_uint(niche_start, tag_layout);
839 let variant_index_relative_val =
840 self.binary_op(mir::BinOp::Sub, &tag_val, &niche_start_val)?;
841 let variant_index_relative = variant_index_relative_val
843 .assert_bits(tag_val.layout.size);
844 // Check if this is in the range that indicates an actual discriminant.
845 if variant_index_relative <= u128::from(variants_end - variants_start) {
846 let variant_index_relative = u32::try_from(variant_index_relative)
847 .expect("we checked that this fits into a u32");
848 // Then computing the absolute variant idx should not overflow any more.
849 let variant_index = variants_start
850 .checked_add(variant_index_relative)
851 .expect("overflow computing absolute variant idx");
852 let variants_len = op
856 .expect("tagged layout for non adt")
859 assert!(usize::try_from(variant_index).unwrap() < variants_len);
860 VariantIdx::from_u32(variant_index)
866 // Compute the size of the scalar we need to return.
867 // No need to cast, because the variant index directly serves as discriminant and is
868 // encoded in the tag.
869 (Scalar::from_uint(variant.as_u32(), discr_layout.size), variant)