1 //! Functions concerning immediate values and operands, and reading from operands.
2 //! All high-level functions to read from memory work on operands as sources.
6 use rustc_hir::def::Namespace;
7 use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt, TyAndLayout};
8 use rustc_middle::ty::print::{FmtPrinter, PrettyPrinter, Printer};
9 use rustc_middle::ty::{ConstInt, DelaySpanBugEmitted, Ty};
10 use rustc_middle::{mir, ty};
11 use rustc_target::abi::{self, Abi, Align, HasDataLayout, Size, TagEncoding};
12 use rustc_target::abi::{VariantIdx, Variants};
15 alloc_range, from_known_layout, mir_assign_valid_types, AllocId, ConstValue, Frame, GlobalId,
16 InterpCx, InterpResult, MPlaceTy, Machine, MemPlace, MemPlaceMeta, Place, PlaceTy, Pointer,
17 Provenance, Scalar, ScalarMaybeUninit,
20 /// An `Immediate` represents a single immediate self-contained Rust value.
22 /// For optimization of a few very common cases, there is also a representation for a pair of
23 /// primitive values (`ScalarPair`). It allows Miri to avoid making allocations for checked binary
24 /// operations and wide pointers. This idea was taken from rustc's codegen.
25 /// In particular, thanks to `ScalarPair`, arithmetic operations and casts can be entirely
26 /// defined on `Immediate`, and do not have to work with a `Place`.
27 #[derive(Copy, Clone, Debug)]
28 pub enum Immediate<Prov: Provenance = AllocId> {
29 /// A single scalar value (must have *initialized* `Scalar` ABI).
30 /// FIXME: we also currently often use this for ZST.
31 /// `ScalarMaybeUninit` should reject ZST, and we should use `Uninit` for them instead.
32 Scalar(ScalarMaybeUninit<Prov>),
33 /// A pair of two scalar value (must have `ScalarPair` ABI where both fields are
34 /// `Scalar::Initialized`).
35 ScalarPair(ScalarMaybeUninit<Prov>, ScalarMaybeUninit<Prov>),
36 /// A value of fully uninitialized memory. Can have and size and layout.
40 impl<Prov: Provenance> From<ScalarMaybeUninit<Prov>> for Immediate<Prov> {
42 fn from(val: ScalarMaybeUninit<Prov>) -> Self {
43 Immediate::Scalar(val)
47 impl<Prov: Provenance> From<Scalar<Prov>> for Immediate<Prov> {
49 fn from(val: Scalar<Prov>) -> Self {
50 Immediate::Scalar(val.into())
54 impl<'tcx, Prov: Provenance> Immediate<Prov> {
55 pub fn from_pointer(p: Pointer<Prov>, cx: &impl HasDataLayout) -> Self {
56 Immediate::Scalar(ScalarMaybeUninit::from_pointer(p, cx))
59 pub fn from_maybe_pointer(p: Pointer<Option<Prov>>, cx: &impl HasDataLayout) -> Self {
60 Immediate::Scalar(ScalarMaybeUninit::from_maybe_pointer(p, cx))
63 pub fn new_slice(val: Scalar<Prov>, len: u64, cx: &impl HasDataLayout) -> Self {
64 Immediate::ScalarPair(val.into(), Scalar::from_machine_usize(len, cx).into())
69 vtable: Pointer<Option<Prov>>,
70 cx: &impl HasDataLayout,
72 Immediate::ScalarPair(val.into(), ScalarMaybeUninit::from_maybe_pointer(vtable, cx))
76 #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
77 pub fn to_scalar_or_uninit(self) -> ScalarMaybeUninit<Prov> {
79 Immediate::Scalar(val) => val,
80 Immediate::ScalarPair(..) => bug!("Got a scalar pair where a scalar was expected"),
81 Immediate::Uninit => ScalarMaybeUninit::Uninit,
86 #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
87 pub fn to_scalar(self) -> InterpResult<'tcx, Scalar<Prov>> {
88 self.to_scalar_or_uninit().check_init()
92 #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
93 pub fn to_scalar_or_uninit_pair(self) -> (ScalarMaybeUninit<Prov>, ScalarMaybeUninit<Prov>) {
95 Immediate::ScalarPair(val1, val2) => (val1, val2),
96 Immediate::Scalar(..) => bug!("Got a scalar where a scalar pair was expected"),
97 Immediate::Uninit => (ScalarMaybeUninit::Uninit, ScalarMaybeUninit::Uninit),
102 #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
103 pub fn to_scalar_pair(self) -> InterpResult<'tcx, (Scalar<Prov>, Scalar<Prov>)> {
104 let (val1, val2) = self.to_scalar_or_uninit_pair();
105 Ok((val1.check_init()?, val2.check_init()?))
109 // ScalarPair needs a type to interpret, so we often have an immediate and a type together
110 // as input for binary and cast operations.
111 #[derive(Clone, Debug)]
112 pub struct ImmTy<'tcx, Prov: Provenance = AllocId> {
113 imm: Immediate<Prov>,
114 pub layout: TyAndLayout<'tcx>,
117 impl<Prov: Provenance> std::fmt::Display for ImmTy<'_, Prov> {
118 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
119 /// Helper function for printing a scalar to a FmtPrinter
120 fn p<'a, 'tcx, Prov: Provenance>(
121 cx: FmtPrinter<'a, 'tcx>,
122 s: ScalarMaybeUninit<Prov>,
124 ) -> Result<FmtPrinter<'a, 'tcx>, std::fmt::Error> {
126 ScalarMaybeUninit::Scalar(Scalar::Int(int)) => {
127 cx.pretty_print_const_scalar_int(int, ty, true)
129 ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr, _sz)) => {
130 // Just print the ptr value. `pretty_print_const_scalar_ptr` would also try to
131 // print what is points to, which would fail since it has no access to the local
133 cx.pretty_print_const_pointer(ptr, ty, true)
135 ScalarMaybeUninit::Uninit => cx.typed_value(
137 this.write_str("uninit ")?;
140 |this| this.print_type(ty),
145 ty::tls::with(|tcx| {
147 Immediate::Scalar(s) => {
148 if let Some(ty) = tcx.lift(self.layout.ty) {
149 let cx = FmtPrinter::new(tcx, Namespace::ValueNS);
150 f.write_str(&p(cx, s, ty)?.into_buffer())?;
153 write!(f, "{:x}: {}", s, self.layout.ty)
155 Immediate::ScalarPair(a, b) => {
156 // FIXME(oli-obk): at least print tuples and slices nicely
157 write!(f, "({:x}, {:x}): {}", a, b, self.layout.ty)
159 Immediate::Uninit => {
160 write!(f, "uninit: {}", self.layout.ty)
167 impl<'tcx, Prov: Provenance> std::ops::Deref for ImmTy<'tcx, Prov> {
168 type Target = Immediate<Prov>;
170 fn deref(&self) -> &Immediate<Prov> {
175 /// An `Operand` is the result of computing a `mir::Operand`. It can be immediate,
176 /// or still in memory. The latter is an optimization, to delay reading that chunk of
177 /// memory and to avoid having to store arbitrary-sized data here.
178 #[derive(Copy, Clone, Debug)]
179 pub enum Operand<Prov: Provenance = AllocId> {
180 Immediate(Immediate<Prov>),
181 Indirect(MemPlace<Prov>),
184 #[derive(Clone, Debug)]
185 pub struct OpTy<'tcx, Prov: Provenance = AllocId> {
186 op: Operand<Prov>, // Keep this private; it helps enforce invariants.
187 pub layout: TyAndLayout<'tcx>,
188 /// rustc does not have a proper way to represent the type of a field of a `repr(packed)` struct:
189 /// it needs to have a different alignment than the field type would usually have.
190 /// So we represent this here with a separate field that "overwrites" `layout.align`.
191 /// This means `layout.align` should never be used for an `OpTy`!
192 /// `None` means "alignment does not matter since this is a by-value operand"
193 /// (`Operand::Immediate`); this field is only relevant for `Operand::Indirect`.
194 /// Also CTFE ignores alignment anyway, so this is for Miri only.
195 pub align: Option<Align>,
198 impl<'tcx, Prov: Provenance> std::ops::Deref for OpTy<'tcx, Prov> {
199 type Target = Operand<Prov>;
201 fn deref(&self) -> &Operand<Prov> {
206 impl<'tcx, Prov: Provenance> From<MPlaceTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
208 fn from(mplace: MPlaceTy<'tcx, Prov>) -> Self {
209 OpTy { op: Operand::Indirect(*mplace), layout: mplace.layout, align: Some(mplace.align) }
213 impl<'tcx, Prov: Provenance> From<&'_ MPlaceTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
215 fn from(mplace: &MPlaceTy<'tcx, Prov>) -> Self {
216 OpTy { op: Operand::Indirect(**mplace), layout: mplace.layout, align: Some(mplace.align) }
220 impl<'tcx, Prov: Provenance> From<&'_ mut MPlaceTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
222 fn from(mplace: &mut MPlaceTy<'tcx, Prov>) -> Self {
223 OpTy { op: Operand::Indirect(**mplace), layout: mplace.layout, align: Some(mplace.align) }
227 impl<'tcx, Prov: Provenance> From<ImmTy<'tcx, Prov>> for OpTy<'tcx, Prov> {
229 fn from(val: ImmTy<'tcx, Prov>) -> Self {
230 OpTy { op: Operand::Immediate(val.imm), layout: val.layout, align: None }
234 impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
236 pub fn from_scalar(val: Scalar<Prov>, layout: TyAndLayout<'tcx>) -> Self {
237 ImmTy { imm: val.into(), layout }
241 pub fn from_immediate(imm: Immediate<Prov>, layout: TyAndLayout<'tcx>) -> Self {
242 ImmTy { imm, layout }
246 pub fn uninit(layout: TyAndLayout<'tcx>) -> Self {
247 ImmTy { imm: Immediate::Uninit, layout }
251 pub fn try_from_uint(i: impl Into<u128>, layout: TyAndLayout<'tcx>) -> Option<Self> {
252 Some(Self::from_scalar(Scalar::try_from_uint(i, layout.size)?, layout))
255 pub fn from_uint(i: impl Into<u128>, layout: TyAndLayout<'tcx>) -> Self {
256 Self::from_scalar(Scalar::from_uint(i, layout.size), layout)
260 pub fn try_from_int(i: impl Into<i128>, layout: TyAndLayout<'tcx>) -> Option<Self> {
261 Some(Self::from_scalar(Scalar::try_from_int(i, layout.size)?, layout))
265 pub fn from_int(i: impl Into<i128>, layout: TyAndLayout<'tcx>) -> Self {
266 Self::from_scalar(Scalar::from_int(i, layout.size), layout)
270 pub fn to_const_int(self) -> ConstInt {
271 assert!(self.layout.ty.is_integral());
272 let int = self.to_scalar().expect("to_const_int doesn't work on scalar pairs").assert_int();
273 ConstInt::new(int, self.layout.ty.is_signed(), self.layout.ty.is_ptr_sized_integral())
277 impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> {
278 pub fn len(&self, cx: &impl HasDataLayout) -> InterpResult<'tcx, u64> {
279 if self.layout.is_unsized() {
280 // There are no unsized immediates.
281 self.assert_mem_place().len(cx)
283 match self.layout.fields {
284 abi::FieldsShape::Array { count, .. } => Ok(count),
285 _ => bug!("len not supported on sized type {:?}", self.layout.ty),
290 pub fn offset_with_meta(
293 meta: MemPlaceMeta<Prov>,
294 layout: TyAndLayout<'tcx>,
295 cx: &impl HasDataLayout,
296 ) -> InterpResult<'tcx, Self> {
297 match self.try_as_mplace() {
298 Ok(mplace) => Ok(mplace.offset_with_meta(offset, meta, layout, cx)?.into()),
301 matches!(*imm, Immediate::Uninit),
302 "Scalar/ScalarPair cannot be offset into"
304 assert!(!meta.has_meta()); // no place to store metadata here
305 // Every part of an uninit is uninit.
306 Ok(ImmTy::uninit(layout).into())
314 layout: TyAndLayout<'tcx>,
315 cx: &impl HasDataLayout,
316 ) -> InterpResult<'tcx, Self> {
317 assert!(!layout.is_unsized());
318 self.offset_with_meta(offset, MemPlaceMeta::None, layout, cx)
322 impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
323 /// Try reading an immediate in memory; this is interesting particularly for `ScalarPair`.
324 /// Returns `None` if the layout does not permit loading this as a value.
326 /// This is an internal function; call `read_immediate` instead.
327 fn read_immediate_from_mplace_raw(
329 mplace: &MPlaceTy<'tcx, M::Provenance>,
331 ) -> InterpResult<'tcx, Option<ImmTy<'tcx, M::Provenance>>> {
332 if mplace.layout.is_unsized() {
333 // Don't touch unsized
337 let Some(alloc) = self.get_place_alloc(mplace)? else {
338 // zero-sized type can be left uninit
339 return Ok(Some(ImmTy::uninit(mplace.layout)));
342 // It may seem like all types with `Scalar` or `ScalarPair` ABI are fair game at this point.
343 // However, `MaybeUninit<u64>` is considered a `Scalar` as far as its layout is concerned --
344 // and yet cannot be represented by an interpreter `Scalar`, since we have to handle the
345 // case where some of the bytes are initialized and others are not. So, we need an extra
346 // check that walks over the type of `mplace` to make sure it is truly correct to treat this
347 // like a `Scalar` (or `ScalarPair`).
348 let scalar_layout = match mplace.layout.abi {
349 // `if` does not work nested inside patterns, making this a bit awkward to express.
350 Abi::Scalar(abi::Scalar::Initialized { value: s, .. }) => Some(s),
351 Abi::Scalar(s) if force => Some(s.primitive()),
354 if let Some(s) = scalar_layout {
355 let size = s.size(self);
356 assert_eq!(size, mplace.layout.size, "abi::Scalar size does not match layout size");
358 .read_scalar(alloc_range(Size::ZERO, size), /*read_provenance*/ s.is_ptr())?;
359 return Ok(Some(ImmTy { imm: scalar.into(), layout: mplace.layout }));
361 let scalar_pair_layout = match mplace.layout.abi {
363 abi::Scalar::Initialized { value: a, .. },
364 abi::Scalar::Initialized { value: b, .. },
366 Abi::ScalarPair(a, b) if force => Some((a.primitive(), b.primitive())),
369 if let Some((a, b)) = scalar_pair_layout {
370 // We checked `ptr_align` above, so all fields will have the alignment they need.
371 // We would anyway check against `ptr_align.restrict_for_offset(b_offset)`,
372 // which `ptr.offset(b_offset)` cannot possibly fail to satisfy.
373 let (a_size, b_size) = (a.size(self), b.size(self));
374 let b_offset = a_size.align_to(b.align(self).abi);
375 assert!(b_offset.bytes() > 0); // in `operand_field` we use the offset to tell apart the fields
376 let a_val = alloc.read_scalar(
377 alloc_range(Size::ZERO, a_size),
378 /*read_provenance*/ a.is_ptr(),
381 .read_scalar(alloc_range(b_offset, b_size), /*read_provenance*/ b.is_ptr())?;
382 return Ok(Some(ImmTy {
383 imm: Immediate::ScalarPair(a_val, b_val),
384 layout: mplace.layout,
387 // Neither a scalar nor scalar pair.
391 /// Try returning an immediate for the operand. If the layout does not permit loading this as an
392 /// immediate, return where in memory we can find the data.
393 /// Note that for a given layout, this operation will either always fail or always
394 /// succeed! Whether it succeeds depends on whether the layout can be represented
395 /// in an `Immediate`, not on which data is stored there currently.
397 /// If `force` is `true`, then even scalars with fields that can be ununit will be
398 /// read. This means the load is lossy and should not be written back!
399 /// This flag exists only for validity checking.
401 /// This is an internal function that should not usually be used; call `read_immediate` instead.
402 /// ConstProp needs it, though.
403 pub fn read_immediate_raw(
405 src: &OpTy<'tcx, M::Provenance>,
407 ) -> InterpResult<'tcx, Result<ImmTy<'tcx, M::Provenance>, MPlaceTy<'tcx, M::Provenance>>> {
408 Ok(match src.try_as_mplace() {
410 if let Some(val) = self.read_immediate_from_mplace_raw(mplace, force)? {
420 /// Read an immediate from a place, asserting that that is possible with the given layout.
422 pub fn read_immediate(
424 op: &OpTy<'tcx, M::Provenance>,
425 ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
426 if let Ok(imm) = self.read_immediate_raw(op, /*force*/ false)? {
429 span_bug!(self.cur_span(), "primitive read failed for type: {:?}", op.layout.ty);
433 /// Read a scalar from a place
436 op: &OpTy<'tcx, M::Provenance>,
437 ) -> InterpResult<'tcx, ScalarMaybeUninit<M::Provenance>> {
438 Ok(self.read_immediate(op)?.to_scalar_or_uninit())
441 /// Read a pointer from a place.
444 op: &OpTy<'tcx, M::Provenance>,
445 ) -> InterpResult<'tcx, Pointer<Option<M::Provenance>>> {
446 self.read_scalar(op)?.to_pointer(self)
449 /// Turn the wide MPlace into a string (must already be dereferenced!)
450 pub fn read_str(&self, mplace: &MPlaceTy<'tcx, M::Provenance>) -> InterpResult<'tcx, &str> {
451 let len = mplace.len(self)?;
452 let bytes = self.read_bytes_ptr(mplace.ptr, Size::from_bytes(len))?;
453 let str = std::str::from_utf8(bytes).map_err(|err| err_ub!(InvalidStr(err)))?;
457 /// Converts a repr(simd) operand into an operand where `place_index` accesses the SIMD elements.
458 /// Also returns the number of elements.
460 /// Can (but does not always) trigger UB if `op` is uninitialized.
461 pub fn operand_to_simd(
463 op: &OpTy<'tcx, M::Provenance>,
464 ) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::Provenance>, u64)> {
465 // Basically we just transmute this place into an array following simd_size_and_type.
466 // This only works in memory, but repr(simd) types should never be immediates anyway.
467 assert!(op.layout.ty.is_simd());
468 match op.try_as_mplace() {
469 Ok(mplace) => self.mplace_to_simd(&mplace),
470 Err(imm) => match *imm {
471 Immediate::Uninit => {
472 throw_ub!(InvalidUninitBytes(None))
474 Immediate::Scalar(..) | Immediate::ScalarPair(..) => {
475 bug!("arrays/slices can never have Scalar/ScalarPair layout")
481 /// Read from a local. Will not actually access the local if reading from a ZST.
482 /// Will not access memory, instead an indirect `Operand` is returned.
484 /// This is public because it is used by [priroda](https://github.com/oli-obk/priroda) to get an
485 /// OpTy from a local.
488 frame: &Frame<'mir, 'tcx, M::Provenance, M::FrameExtra>,
490 layout: Option<TyAndLayout<'tcx>>,
491 ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
492 let layout = self.layout_of_local(frame, local, layout)?;
493 let op = if layout.is_zst() {
494 // Bypass `access_local` (helps in ConstProp)
495 Operand::Immediate(Immediate::Uninit)
497 *M::access_local(frame, local)?
499 Ok(OpTy { op, layout, align: Some(layout.align.abi) })
502 /// Every place can be read from, so we can turn them into an operand.
503 /// This will definitely return `Indirect` if the place is a `Ptr`, i.e., this
504 /// will never actually read from memory.
508 place: &PlaceTy<'tcx, M::Provenance>,
509 ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
510 let op = match **place {
511 Place::Ptr(mplace) => Operand::Indirect(mplace),
512 Place::Local { frame, local } => {
513 *self.local_to_op(&self.stack()[frame], local, None)?
516 Ok(OpTy { op, layout: place.layout, align: Some(place.align) })
519 /// Evaluate a place with the goal of reading from it. This lets us sometimes
520 /// avoid allocations.
521 pub fn eval_place_to_op(
523 mir_place: mir::Place<'tcx>,
524 layout: Option<TyAndLayout<'tcx>>,
525 ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
526 // Do not use the layout passed in as argument if the base we are looking at
527 // here is not the entire place.
528 let layout = if mir_place.projection.is_empty() { layout } else { None };
530 let mut op = self.local_to_op(self.frame(), mir_place.local, layout)?;
531 // Using `try_fold` turned out to be bad for performance, hence the loop.
532 for elem in mir_place.projection.iter() {
533 op = self.operand_projection(&op, elem)?
536 trace!("eval_place_to_op: got {:?}", *op);
537 // Sanity-check the type we ended up with.
539 mir_assign_valid_types(
542 self.layout_of(self.subst_from_current_frame_and_normalize_erasing_regions(
543 mir_place.ty(&self.frame().body.local_decls, *self.tcx).ty
547 "eval_place of a MIR place with type {:?} produced an interpreter operand with type {:?}",
548 mir_place.ty(&self.frame().body.local_decls, *self.tcx).ty,
554 /// Evaluate the operand, returning a place where you can then find the data.
555 /// If you already know the layout, you can save two table lookups
556 /// by passing it in here.
560 mir_op: &mir::Operand<'tcx>,
561 layout: Option<TyAndLayout<'tcx>>,
562 ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
563 use rustc_middle::mir::Operand::*;
564 let op = match *mir_op {
565 // FIXME: do some more logic on `move` to invalidate the old location
566 Copy(place) | Move(place) => self.eval_place_to_op(place, layout)?,
568 Constant(ref constant) => {
570 self.subst_from_current_frame_and_normalize_erasing_regions(constant.literal)?;
572 // This can still fail:
573 // * During ConstProp, with `TooGeneric` or since the `required_consts` were not all
575 // * During CTFE, since promoteds in `const`/`static` initializer bodies can fail.
576 self.mir_const_to_op(&val, layout)?
579 trace!("{:?}: {:?}", mir_op, *op);
583 /// Evaluate a bunch of operands at once
584 pub(super) fn eval_operands(
586 ops: &[mir::Operand<'tcx>],
587 ) -> InterpResult<'tcx, Vec<OpTy<'tcx, M::Provenance>>> {
588 ops.iter().map(|op| self.eval_operand(op, None)).collect()
591 // Used when the miri-engine runs into a constant and for extracting information from constants
592 // in patterns via the `const_eval` module
593 /// The `val` and `layout` are assumed to already be in our interpreter
594 /// "universe" (param_env).
598 layout: Option<TyAndLayout<'tcx>>,
599 ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
601 ty::ConstKind::Param(_) | ty::ConstKind::Bound(..) => throw_inval!(TooGeneric),
602 ty::ConstKind::Error(DelaySpanBugEmitted { reported, .. }) => {
603 throw_inval!(AlreadyReported(reported))
605 ty::ConstKind::Unevaluated(uv) => {
606 let instance = self.resolve(uv.def, uv.substs)?;
607 Ok(self.eval_to_allocation(GlobalId { instance, promoted: uv.promoted })?.into())
609 ty::ConstKind::Infer(..) | ty::ConstKind::Placeholder(..) => {
610 span_bug!(self.cur_span(), "const_to_op: Unexpected ConstKind {:?}", c)
612 ty::ConstKind::Value(valtree) => {
614 let const_val = self.tcx.valtree_to_const_val((ty, valtree));
615 self.const_val_to_op(const_val, ty, layout)
620 pub fn mir_const_to_op(
622 val: &mir::ConstantKind<'tcx>,
623 layout: Option<TyAndLayout<'tcx>>,
624 ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
626 mir::ConstantKind::Ty(ct) => self.const_to_op(*ct, layout),
627 mir::ConstantKind::Val(val, ty) => self.const_val_to_op(*val, *ty, layout),
631 pub(crate) fn const_val_to_op(
633 val_val: ConstValue<'tcx>,
635 layout: Option<TyAndLayout<'tcx>>,
636 ) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
637 // Other cases need layout.
638 let adjust_scalar = |scalar| -> InterpResult<'tcx, _> {
640 Scalar::Ptr(ptr, size) => Scalar::Ptr(self.global_base_pointer(ptr)?, size),
641 Scalar::Int(int) => Scalar::Int(int),
644 let layout = from_known_layout(self.tcx, self.param_env, layout, || self.layout_of(ty))?;
645 let op = match val_val {
646 ConstValue::ByRef { alloc, offset } => {
647 let id = self.tcx.create_memory_alloc(alloc);
648 // We rely on mutability being set correctly in that allocation to prevent writes
649 // where none should happen.
650 let ptr = self.global_base_pointer(Pointer::new(id, offset))?;
651 Operand::Indirect(MemPlace::from_ptr(ptr.into()))
653 ConstValue::Scalar(x) => Operand::Immediate(adjust_scalar(x)?.into()),
654 ConstValue::ZeroSized => Operand::Immediate(Immediate::Uninit),
655 ConstValue::Slice { data, start, end } => {
656 // We rely on mutability being set correctly in `data` to prevent writes
657 // where none should happen.
658 let ptr = Pointer::new(
659 self.tcx.create_memory_alloc(data),
660 Size::from_bytes(start), // offset: `start`
662 Operand::Immediate(Immediate::new_slice(
663 Scalar::from_pointer(self.global_base_pointer(ptr)?, &*self.tcx),
664 u64::try_from(end.checked_sub(start).unwrap()).unwrap(), // len: `end - start`
669 Ok(OpTy { op, layout, align: Some(layout.align.abi) })
672 /// Read discriminant, return the runtime value as well as the variant index.
673 /// Can also legally be called on non-enums (e.g. through the discriminant_value intrinsic)!
674 pub fn read_discriminant(
676 op: &OpTy<'tcx, M::Provenance>,
677 ) -> InterpResult<'tcx, (Scalar<M::Provenance>, VariantIdx)> {
678 trace!("read_discriminant_value {:#?}", op.layout);
679 // Get type and layout of the discriminant.
680 let discr_layout = self.layout_of(op.layout.ty.discriminant_ty(*self.tcx))?;
681 trace!("discriminant type: {:?}", discr_layout.ty);
683 // We use "discriminant" to refer to the value associated with a particular enum variant.
684 // This is not to be confused with its "variant index", which is just determining its position in the
685 // declared list of variants -- they can differ with explicitly assigned discriminants.
686 // We use "tag" to refer to how the discriminant is encoded in memory, which can be either
687 // straight-forward (`TagEncoding::Direct`) or with a niche (`TagEncoding::Niche`).
688 let (tag_scalar_layout, tag_encoding, tag_field) = match op.layout.variants {
689 Variants::Single { index } => {
690 let discr = match op.layout.ty.discriminant_for_variant(*self.tcx, index) {
692 // This type actually has discriminants.
693 assert_eq!(discr.ty, discr_layout.ty);
694 Scalar::from_uint(discr.val, discr_layout.size)
697 // On a type without actual discriminants, variant is 0.
698 assert_eq!(index.as_u32(), 0);
699 Scalar::from_uint(index.as_u32(), discr_layout.size)
702 return Ok((discr, index));
704 Variants::Multiple { tag, ref tag_encoding, tag_field, .. } => {
705 (tag, tag_encoding, tag_field)
709 // There are *three* layouts that come into play here:
710 // - The discriminant has a type for typechecking. This is `discr_layout`, and is used for
711 // the `Scalar` we return.
712 // - The tag (encoded discriminant) has layout `tag_layout`. This is always an integer type,
713 // and used to interpret the value we read from the tag field.
714 // For the return value, a cast to `discr_layout` is performed.
715 // - The field storing the tag has a layout, which is very similar to `tag_layout` but
716 // may be a pointer. This is `tag_val.layout`; we just use it for sanity checks.
718 // Get layout for tag.
719 let tag_layout = self.layout_of(tag_scalar_layout.primitive().to_int_ty(*self.tcx))?;
721 // Read tag and sanity-check `tag_layout`.
722 let tag_val = self.read_immediate(&self.operand_field(op, tag_field)?)?;
723 assert_eq!(tag_layout.size, tag_val.layout.size);
724 assert_eq!(tag_layout.abi.is_signed(), tag_val.layout.abi.is_signed());
725 trace!("tag value: {}", tag_val);
727 // Figure out which discriminant and variant this corresponds to.
728 Ok(match *tag_encoding {
729 TagEncoding::Direct => {
730 let scalar = tag_val.to_scalar()?;
731 // Generate a specific error if `tag_val` is not an integer.
732 // (`tag_bits` itself is only used for error messages below.)
733 let tag_bits = scalar
735 .map_err(|dbg_val| err_ub!(InvalidTag(dbg_val)))?
736 .assert_bits(tag_layout.size);
737 // Cast bits from tag layout to discriminant layout.
738 // After the checks we did above, this cannot fail, as
739 // discriminants are int-like.
741 self.cast_from_int_like(scalar, tag_val.layout, discr_layout.ty).unwrap();
742 let discr_bits = discr_val.assert_bits(discr_layout.size);
743 // Convert discriminant to variant index, and catch invalid discriminants.
744 let index = match *op.layout.ty.kind() {
746 adt.discriminants(*self.tcx).find(|(_, var)| var.val == discr_bits)
748 ty::Generator(def_id, substs, _) => {
749 let substs = substs.as_generator();
751 .discriminants(def_id, *self.tcx)
752 .find(|(_, var)| var.val == discr_bits)
754 _ => span_bug!(self.cur_span(), "tagged layout for non-adt non-generator"),
756 .ok_or_else(|| err_ub!(InvalidTag(Scalar::from_uint(tag_bits, tag_layout.size))))?;
757 // Return the cast value, and the index.
760 TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start } => {
761 let tag_val = tag_val.to_scalar()?;
762 // Compute the variant this niche value/"tag" corresponds to. With niche layout,
763 // discriminant (encoded in niche/tag) and variant index are the same.
764 let variants_start = niche_variants.start().as_u32();
765 let variants_end = niche_variants.end().as_u32();
766 let variant = match tag_val.try_to_int() {
768 // So this is a pointer then, and casting to an int failed.
769 // Can only happen during CTFE.
770 // The niche must be just 0, and the ptr not null, then we know this is
771 // okay. Everything else, we conservatively reject.
772 let ptr_valid = niche_start == 0
773 && variants_start == variants_end
774 && !self.scalar_may_be_null(tag_val)?;
776 throw_ub!(InvalidTag(dbg_val))
781 let tag_bits = tag_bits.assert_bits(tag_layout.size);
782 // We need to use machine arithmetic to get the relative variant idx:
783 // variant_index_relative = tag_val - niche_start_val
784 let tag_val = ImmTy::from_uint(tag_bits, tag_layout);
785 let niche_start_val = ImmTy::from_uint(niche_start, tag_layout);
786 let variant_index_relative_val =
787 self.binary_op(mir::BinOp::Sub, &tag_val, &niche_start_val)?;
788 let variant_index_relative = variant_index_relative_val
790 .assert_bits(tag_val.layout.size);
791 // Check if this is in the range that indicates an actual discriminant.
792 if variant_index_relative <= u128::from(variants_end - variants_start) {
793 let variant_index_relative = u32::try_from(variant_index_relative)
794 .expect("we checked that this fits into a u32");
795 // Then computing the absolute variant idx should not overflow any more.
796 let variant_index = variants_start
797 .checked_add(variant_index_relative)
798 .expect("overflow computing absolute variant idx");
799 let variants_len = op
803 .expect("tagged layout for non adt")
806 assert!(usize::try_from(variant_index).unwrap() < variants_len);
807 VariantIdx::from_u32(variant_index)
813 // Compute the size of the scalar we need to return.
814 // No need to cast, because the variant index directly serves as discriminant and is
815 // encoded in the tag.
816 (Scalar::from_uint(variant.as_u32(), discr_layout.size), variant)
822 // Some nodes are used a lot. Make sure they don't unintentionally get bigger.
823 #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
826 // These are in alphabetical order, which is easy to maintain.
827 rustc_data_structures::static_assert_size!(Immediate, 56);
828 rustc_data_structures::static_assert_size!(ImmTy<'_>, 72);
829 rustc_data_structures::static_assert_size!(Operand, 64);
830 rustc_data_structures::static_assert_size!(OpTy<'_>, 88);