1 //! Functions concerning immediate values and operands, and reading from operands.
2 //! All high-level functions to read from memory work on operands as sources.
4 use std::convert::{TryFrom, TryInto};
6 use rustc::ty::layout::{
7 self, HasDataLayout, IntegerExt, LayoutOf, PrimitiveExt, Size, TyLayout, VariantIdx,
11 use super::{InterpCx, MPlaceTy, Machine, MemPlace, Place, PlaceTy};
12 pub use rustc::mir::interpret::ScalarMaybeUndef;
13 use rustc::mir::interpret::{
14 sign_extend, truncate, AllocId, ConstValue, GlobalId, InterpResult, Pointer, Scalar,
16 use rustc_macros::HashStable;
19 /// An `Immediate` represents a single immediate self-contained Rust value.
21 /// For optimization of a few very common cases, there is also a representation for a pair of
22 /// primitive values (`ScalarPair`). It allows Miri to avoid making allocations for checked binary
23 /// operations and wide pointers. This idea was taken from rustc's codegen.
24 /// In particular, thanks to `ScalarPair`, arithmetic operations and casts can be entirely
25 /// defined on `Immediate`, and do not have to work with a `Place`.
26 #[derive(Copy, Clone, Debug, PartialEq, Eq, HashStable, Hash)]
27 pub enum Immediate<Tag = (), Id = AllocId> {
28 Scalar(ScalarMaybeUndef<Tag, Id>),
29 ScalarPair(ScalarMaybeUndef<Tag, Id>, ScalarMaybeUndef<Tag, Id>),
32 impl<Tag> From<ScalarMaybeUndef<Tag>> for Immediate<Tag> {
34 fn from(val: ScalarMaybeUndef<Tag>) -> Self {
35 Immediate::Scalar(val)
39 impl<Tag> From<Scalar<Tag>> for Immediate<Tag> {
41 fn from(val: Scalar<Tag>) -> Self {
42 Immediate::Scalar(val.into())
46 impl<Tag> From<Pointer<Tag>> for Immediate<Tag> {
48 fn from(val: Pointer<Tag>) -> Self {
49 Immediate::Scalar(Scalar::from(val).into())
53 impl<'tcx, Tag> Immediate<Tag> {
54 pub fn new_slice(val: Scalar<Tag>, len: u64, cx: &impl HasDataLayout) -> Self {
55 Immediate::ScalarPair(
57 Scalar::from_uint(len, cx.data_layout().pointer_size).into(),
61 pub fn new_dyn_trait(val: Scalar<Tag>, vtable: Pointer<Tag>) -> Self {
62 Immediate::ScalarPair(val.into(), vtable.into())
66 pub fn to_scalar_or_undef(self) -> ScalarMaybeUndef<Tag> {
68 Immediate::Scalar(val) => val,
69 Immediate::ScalarPair(..) => bug!("Got a wide pointer where a scalar was expected"),
74 pub fn to_scalar(self) -> InterpResult<'tcx, Scalar<Tag>> {
75 self.to_scalar_or_undef().not_undef()
79 pub fn to_scalar_pair(self) -> InterpResult<'tcx, (Scalar<Tag>, Scalar<Tag>)> {
81 Immediate::Scalar(..) => bug!("Got a thin pointer where a scalar pair was expected"),
82 Immediate::ScalarPair(a, b) => Ok((a.not_undef()?, b.not_undef()?)),
87 // ScalarPair needs a type to interpret, so we often have an immediate and a type together
88 // as input for binary and cast operations.
89 #[derive(Copy, Clone, Debug)]
90 pub struct ImmTy<'tcx, Tag = ()> {
91 pub(crate) imm: Immediate<Tag>,
92 pub layout: TyLayout<'tcx>,
95 // `Tag: Copy` because some methods on `Scalar` consume them by value
96 impl<Tag: Copy> std::fmt::Display for ImmTy<'tcx, Tag> {
97 fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
99 Immediate::Scalar(ScalarMaybeUndef::Scalar(s)) => match s.to_bits(self.layout.size) {
101 match self.layout.ty.kind {
106 super::sign_extend(s, self.layout.size) as i128,
109 ty::Uint(_) => return write!(fmt, "{}", s),
110 ty::Bool if s == 0 => return fmt.write_str("false"),
111 ty::Bool if s == 1 => return fmt.write_str("true"),
113 if let Some(c) = u32::try_from(s).ok().and_then(std::char::from_u32) {
114 return write!(fmt, "{}", c);
117 ty::Float(ast::FloatTy::F32) => {
118 if let Ok(u) = u32::try_from(s) {
119 return write!(fmt, "{}", f32::from_bits(u));
122 ty::Float(ast::FloatTy::F64) => {
123 if let Ok(u) = u64::try_from(s) {
124 return write!(fmt, "{}", f64::from_bits(u));
129 write!(fmt, "{:x}", s)
131 Err(_) => fmt.write_str("{pointer}"),
133 Immediate::Scalar(ScalarMaybeUndef::Undef) => fmt.write_str("{undef}"),
134 Immediate::ScalarPair(..) => fmt.write_str("{wide pointer or tuple}"),
139 impl<'tcx, Tag> ::std::ops::Deref for ImmTy<'tcx, Tag> {
140 type Target = Immediate<Tag>;
142 fn deref(&self) -> &Immediate<Tag> {
147 /// An `Operand` is the result of computing a `mir::Operand`. It can be immediate,
148 /// or still in memory. The latter is an optimization, to delay reading that chunk of
149 /// memory and to avoid having to store arbitrary-sized data here.
150 #[derive(Copy, Clone, Debug, PartialEq, Eq, HashStable, Hash)]
151 pub enum Operand<Tag = (), Id = AllocId> {
152 Immediate(Immediate<Tag, Id>),
153 Indirect(MemPlace<Tag, Id>),
156 impl<Tag> Operand<Tag> {
158 pub fn assert_mem_place(self) -> MemPlace<Tag>
160 Tag: ::std::fmt::Debug,
163 Operand::Indirect(mplace) => mplace,
164 _ => bug!("assert_mem_place: expected Operand::Indirect, got {:?}", self),
169 pub fn assert_immediate(self) -> Immediate<Tag>
171 Tag: ::std::fmt::Debug,
174 Operand::Immediate(imm) => imm,
175 _ => bug!("assert_immediate: expected Operand::Immediate, got {:?}", self),
180 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
181 pub struct OpTy<'tcx, Tag = ()> {
182 op: Operand<Tag>, // Keep this private; it helps enforce invariants.
183 pub layout: TyLayout<'tcx>,
186 impl<'tcx, Tag> ::std::ops::Deref for OpTy<'tcx, Tag> {
187 type Target = Operand<Tag>;
189 fn deref(&self) -> &Operand<Tag> {
194 impl<'tcx, Tag: Copy> From<MPlaceTy<'tcx, Tag>> for OpTy<'tcx, Tag> {
196 fn from(mplace: MPlaceTy<'tcx, Tag>) -> Self {
197 OpTy { op: Operand::Indirect(*mplace), layout: mplace.layout }
201 impl<'tcx, Tag> From<ImmTy<'tcx, Tag>> for OpTy<'tcx, Tag> {
203 fn from(val: ImmTy<'tcx, Tag>) -> Self {
204 OpTy { op: Operand::Immediate(val.imm), layout: val.layout }
208 impl<'tcx, Tag: Copy> ImmTy<'tcx, Tag> {
210 pub fn from_scalar(val: Scalar<Tag>, layout: TyLayout<'tcx>) -> Self {
211 ImmTy { imm: val.into(), layout }
215 pub fn try_from_uint(i: impl Into<u128>, layout: TyLayout<'tcx>) -> Option<Self> {
216 Some(Self::from_scalar(Scalar::try_from_uint(i, layout.size)?, layout))
219 pub fn from_uint(i: impl Into<u128>, layout: TyLayout<'tcx>) -> Self {
220 Self::from_scalar(Scalar::from_uint(i, layout.size), layout)
224 pub fn try_from_int(i: impl Into<i128>, layout: TyLayout<'tcx>) -> Option<Self> {
225 Some(Self::from_scalar(Scalar::try_from_int(i, layout.size)?, layout))
229 pub fn from_int(i: impl Into<i128>, layout: TyLayout<'tcx>) -> Self {
230 Self::from_scalar(Scalar::from_int(i, layout.size), layout)
234 pub fn to_bits(self) -> InterpResult<'tcx, u128> {
235 self.to_scalar()?.to_bits(self.layout.size)
239 // Use the existing layout if given (but sanity check in debug mode),
240 // or compute the layout.
242 pub(super) fn from_known_layout<'tcx>(
243 layout: Option<TyLayout<'tcx>>,
244 compute: impl FnOnce() -> InterpResult<'tcx, TyLayout<'tcx>>,
245 ) -> InterpResult<'tcx, TyLayout<'tcx>> {
249 if cfg!(debug_assertions) {
250 let layout2 = compute()?;
252 layout.details, layout2.details,
253 "mismatch in layout of supposedly equal-layout types {:?} and {:?}",
254 layout.ty, layout2.ty
262 impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
263 /// Normalice `place.ptr` to a `Pointer` if this is a place and not a ZST.
264 /// Can be helpful to avoid lots of `force_ptr` calls later, if this place is used a lot.
268 op: OpTy<'tcx, M::PointerTag>,
269 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
270 match op.try_as_mplace() {
271 Ok(mplace) => Ok(self.force_mplace_ptr(mplace)?.into()),
272 Err(imm) => Ok(imm.into()), // Nothing to cast/force
276 /// Try reading an immediate in memory; this is interesting particularly for `ScalarPair`.
277 /// Returns `None` if the layout does not permit loading this as a value.
278 fn try_read_immediate_from_mplace(
280 mplace: MPlaceTy<'tcx, M::PointerTag>,
281 ) -> InterpResult<'tcx, Option<ImmTy<'tcx, M::PointerTag>>> {
282 if mplace.layout.is_unsized() {
283 // Don't touch unsized
288 .check_mplace_access(mplace, None)
289 .expect("places should be checked on creation")
293 return Ok(Some(ImmTy {
295 imm: Scalar::zst().into(),
296 layout: mplace.layout,
301 match mplace.layout.abi {
302 layout::Abi::Scalar(..) => {
303 let scalar = self.memory.get_raw(ptr.alloc_id)?.read_scalar(
308 Ok(Some(ImmTy { imm: scalar.into(), layout: mplace.layout }))
310 layout::Abi::ScalarPair(ref a, ref b) => {
311 // We checked `ptr_align` above, so all fields will have the alignment they need.
312 // We would anyway check against `ptr_align.restrict_for_offset(b_offset)`,
313 // which `ptr.offset(b_offset)` cannot possibly fail to satisfy.
314 let (a, b) = (&a.value, &b.value);
315 let (a_size, b_size) = (a.size(self), b.size(self));
317 let b_offset = a_size.align_to(b.align(self).abi);
318 assert!(b_offset.bytes() > 0); // we later use the offset to tell apart the fields
319 let b_ptr = ptr.offset(b_offset, self)?;
320 let a_val = self.memory.get_raw(ptr.alloc_id)?.read_scalar(self, a_ptr, a_size)?;
321 let b_val = self.memory.get_raw(ptr.alloc_id)?.read_scalar(self, b_ptr, b_size)?;
322 Ok(Some(ImmTy { imm: Immediate::ScalarPair(a_val, b_val), layout: mplace.layout }))
328 /// Try returning an immediate for the operand.
329 /// If the layout does not permit loading this as an immediate, return where in memory
330 /// we can find the data.
331 /// Note that for a given layout, this operation will either always fail or always
332 /// succeed! Whether it succeeds depends on whether the layout can be represented
333 /// in a `Immediate`, not on which data is stored there currently.
334 pub(crate) fn try_read_immediate(
336 src: OpTy<'tcx, M::PointerTag>,
337 ) -> InterpResult<'tcx, Result<ImmTy<'tcx, M::PointerTag>, MPlaceTy<'tcx, M::PointerTag>>> {
338 Ok(match src.try_as_mplace() {
340 if let Some(val) = self.try_read_immediate_from_mplace(mplace)? {
350 /// Read an immediate from a place, asserting that that is possible with the given layout.
352 pub fn read_immediate(
354 op: OpTy<'tcx, M::PointerTag>,
355 ) -> InterpResult<'tcx, ImmTy<'tcx, M::PointerTag>> {
356 if let Ok(imm) = self.try_read_immediate(op)? {
359 bug!("primitive read failed for type: {:?}", op.layout.ty);
363 /// Read a scalar from a place
366 op: OpTy<'tcx, M::PointerTag>,
367 ) -> InterpResult<'tcx, ScalarMaybeUndef<M::PointerTag>> {
368 Ok(self.read_immediate(op)?.to_scalar_or_undef())
371 // Turn the wide MPlace into a string (must already be dereferenced!)
372 pub fn read_str(&self, mplace: MPlaceTy<'tcx, M::PointerTag>) -> InterpResult<'tcx, &str> {
373 let len = mplace.len(self)?;
374 let bytes = self.memory.read_bytes(mplace.ptr, Size::from_bytes(len as u64))?;
375 let str = ::std::str::from_utf8(bytes)
376 .map_err(|err| err_unsup!(ValidationFailure(err.to_string())))?;
380 /// Projection functions
381 pub fn operand_field(
383 op: OpTy<'tcx, M::PointerTag>,
385 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
386 let base = match op.try_as_mplace() {
389 let field = self.mplace_field(mplace, field)?;
390 return Ok(field.into());
395 let field = field.try_into().unwrap();
396 let field_layout = op.layout.field(self, field)?;
397 if field_layout.is_zst() {
398 let immediate = Scalar::zst().into();
399 return Ok(OpTy { op: Operand::Immediate(immediate), layout: field_layout });
401 let offset = op.layout.fields.offset(field);
402 let immediate = match *base {
403 // the field covers the entire type
404 _ if offset.bytes() == 0 && field_layout.size == op.layout.size => *base,
405 // extract fields from types with `ScalarPair` ABI
406 Immediate::ScalarPair(a, b) => {
407 let val = if offset.bytes() == 0 { a } else { b };
410 Immediate::Scalar(val) => {
411 bug!("field access on non aggregate {:#?}, {:#?}", val, op.layout)
414 Ok(OpTy { op: Operand::Immediate(immediate), layout: field_layout })
417 pub fn operand_downcast(
419 op: OpTy<'tcx, M::PointerTag>,
421 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
422 // Downcasts only change the layout
423 Ok(match op.try_as_mplace() {
424 Ok(mplace) => self.mplace_downcast(mplace, variant)?.into(),
426 let layout = op.layout.for_variant(self, variant);
427 OpTy { layout, ..op }
432 pub fn operand_projection(
434 base: OpTy<'tcx, M::PointerTag>,
435 proj_elem: &mir::PlaceElem<'tcx>,
436 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
437 use rustc::mir::ProjectionElem::*;
438 Ok(match *proj_elem {
439 Field(field, _) => self.operand_field(base, field.index() as u64)?,
440 Downcast(_, variant) => self.operand_downcast(base, variant)?,
441 Deref => self.deref_operand(base)?.into(),
442 ConstantIndex { .. } | Index(_) if base.layout.is_zst() => {
444 op: Operand::Immediate(Scalar::zst().into()),
445 // the actual index doesn't matter, so we just pick a convenient one like 0
446 layout: base.layout.field(self, 0)?,
449 Subslice { from, to, from_end } if base.layout.is_zst() => {
450 let elem_ty = if let ty::Array(elem_ty, _) = base.layout.ty.kind {
453 bug!("slices shouldn't be zero-sized");
455 assert!(!from_end, "arrays shouldn't be subsliced from the end");
458 op: Operand::Immediate(Scalar::zst().into()),
459 layout: self.layout_of(self.tcx.mk_array(elem_ty, (to - from) as u64))?,
462 Subslice { .. } | ConstantIndex { .. } | Index(_) => {
463 // The rest should only occur as mplace, we do not use Immediates for types
464 // allowing such operations. This matches place_projection forcing an allocation.
465 let mplace = base.assert_mem_place();
466 self.mplace_projection(mplace, proj_elem)?.into()
471 /// This is used by [priroda](https://github.com/oli-obk/priroda) to get an OpTy from a local
474 frame: &super::Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>,
476 layout: Option<TyLayout<'tcx>>,
477 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
478 assert_ne!(local, mir::RETURN_PLACE);
479 let layout = self.layout_of_local(frame, local, layout)?;
480 let op = if layout.is_zst() {
481 // Do not read from ZST, they might not be initialized
482 Operand::Immediate(Scalar::zst().into())
484 M::access_local(&self, frame, local)?
486 Ok(OpTy { op, layout })
489 /// Every place can be read from, so we can turn them into an operand
493 place: PlaceTy<'tcx, M::PointerTag>,
494 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
495 let op = match *place {
496 Place::Ptr(mplace) => Operand::Indirect(mplace),
497 Place::Local { frame, local } => *self.access_local(&self.stack[frame], local, None)?,
499 Ok(OpTy { op, layout: place.layout })
502 // Evaluate a place with the goal of reading from it. This lets us sometimes
503 // avoid allocations.
504 pub fn eval_place_to_op(
506 place: &mir::Place<'tcx>,
507 layout: Option<TyLayout<'tcx>>,
508 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
509 use rustc::mir::PlaceBase;
511 let base_op = match &place.base {
512 PlaceBase::Local(mir::RETURN_PLACE) => throw_unsup!(ReadFromReturnPointer),
513 PlaceBase::Local(local) => {
514 // Do not use the layout passed in as argument if the base we are looking at
515 // here is not the entire place.
516 // FIXME use place_projection.is_empty() when is available
517 let layout = if place.projection.is_empty() { layout } else { None };
519 self.access_local(self.frame(), *local, layout)?
521 PlaceBase::Static(place_static) => self.eval_static_to_mplace(&place_static)?.into(),
527 .try_fold(base_op, |op, elem| self.operand_projection(op, elem))?;
529 trace!("eval_place_to_op: got {:?}", *op);
533 /// Evaluate the operand, returning a place where you can then find the data.
534 /// If you already know the layout, you can save two table lookups
535 /// by passing it in here.
538 mir_op: &mir::Operand<'tcx>,
539 layout: Option<TyLayout<'tcx>>,
540 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
541 use rustc::mir::Operand::*;
542 let op = match *mir_op {
543 // FIXME: do some more logic on `move` to invalidate the old location
544 Copy(ref place) | Move(ref place) => self.eval_place_to_op(place, layout)?,
546 Constant(ref constant) => {
547 let val = self.subst_from_frame_and_normalize_erasing_regions(constant.literal);
548 self.eval_const_to_op(val, layout)?
551 trace!("{:?}: {:?}", mir_op, *op);
555 /// Evaluate a bunch of operands at once
556 pub(super) fn eval_operands(
558 ops: &[mir::Operand<'tcx>],
559 ) -> InterpResult<'tcx, Vec<OpTy<'tcx, M::PointerTag>>> {
560 ops.into_iter().map(|op| self.eval_operand(op, None)).collect()
563 // Used when the miri-engine runs into a constant and for extracting information from constants
564 // in patterns via the `const_eval` module
565 /// The `val` and `layout` are assumed to already be in our interpreter
566 /// "universe" (param_env).
567 crate fn eval_const_to_op(
569 val: &'tcx ty::Const<'tcx>,
570 layout: Option<TyLayout<'tcx>>,
571 ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
572 let tag_scalar = |scalar| match scalar {
573 Scalar::Ptr(ptr) => Scalar::Ptr(self.tag_static_base_pointer(ptr)),
574 Scalar::Raw { data, size } => Scalar::Raw { data, size },
576 // Early-return cases.
577 let val_val = match val.val {
578 ty::ConstKind::Param(_) => throw_inval!(TooGeneric),
579 ty::ConstKind::Unevaluated(def_id, substs) => {
580 let instance = self.resolve(def_id, substs)?;
581 // For statics we pick `ParamEnv::reveal_all`, because statics don't have generics
582 // and thus don't care about the parameter environment. While we could just use
583 // `self.param_env`, that would mean we invoke the query to evaluate the static
584 // with different parameter environments, thus causing the static to be evaluated
586 let param_env = if self.tcx.is_static(def_id) {
587 ty::ParamEnv::reveal_all()
591 // We use `const_eval` here and `const_eval_raw` elsewhere in mir interpretation.
592 // The reason we use `const_eval_raw` everywhere else is to prevent cycles during
593 // validation, because validation automatically reads through any references, thus
594 // potentially requiring the current static to be evaluated again. This is not a
595 // problem here, because we need an operand and operands are always reads.
596 // FIXME(oli-obk): eliminate all the `const_eval_raw` usages when we get rid of
597 // `StaticKind` once and for all.
599 self.tcx.const_eval(param_env.and(GlobalId { instance, promoted: None }))?;
600 // "recurse". This is only ever going into a recusion depth of 1, because after
601 // `const_eval` we don't have `Unevaluated` anymore.
602 return self.eval_const_to_op(val, layout);
604 ty::ConstKind::Value(val_val) => val_val,
606 // Other cases need layout.
607 let layout = from_known_layout(layout, || self.layout_of(val.ty))?;
608 let op = match val_val {
609 ConstValue::ByRef { alloc, offset } => {
610 let id = self.tcx.alloc_map.lock().create_memory_alloc(alloc);
611 // We rely on mutability being set correctly in that allocation to prevent writes
612 // where none should happen.
613 let ptr = self.tag_static_base_pointer(Pointer::new(id, offset));
614 Operand::Indirect(MemPlace::from_ptr(ptr, layout.align.abi))
616 ConstValue::Scalar(x) => Operand::Immediate(tag_scalar(x).into()),
617 ConstValue::Slice { data, start, end } => {
618 // We rely on mutability being set correctly in `data` to prevent writes
619 // where none should happen.
620 let ptr = Pointer::new(
621 self.tcx.alloc_map.lock().create_memory_alloc(data),
622 Size::from_bytes(start as u64), // offset: `start`
624 Operand::Immediate(Immediate::new_slice(
625 self.tag_static_base_pointer(ptr).into(),
626 (end - start) as u64, // len: `end - start`
631 Ok(OpTy { op, layout })
634 /// Read discriminant, return the runtime value as well as the variant index.
635 pub fn read_discriminant(
637 rval: OpTy<'tcx, M::PointerTag>,
638 ) -> InterpResult<'tcx, (u128, VariantIdx)> {
639 trace!("read_discriminant_value {:#?}", rval.layout);
641 let (discr_layout, discr_kind, discr_index) = match rval.layout.variants {
642 layout::Variants::Single { index } => {
646 .discriminant_for_variant(*self.tcx, index)
647 .map_or(index.as_u32() as u128, |discr| discr.val);
648 return Ok((discr_val, index));
650 layout::Variants::Multiple {
651 discr: ref discr_layout,
655 } => (discr_layout, discr_kind, discr_index),
658 // read raw discriminant value
659 let discr_op = self.operand_field(rval, discr_index as u64)?;
660 let discr_val = self.read_immediate(discr_op)?;
661 let raw_discr = discr_val.to_scalar_or_undef();
662 trace!("discr value: {:?}", raw_discr);
664 Ok(match *discr_kind {
665 layout::DiscriminantKind::Tag => {
666 let bits_discr = raw_discr
668 .and_then(|raw_discr| self.force_bits(raw_discr, discr_val.layout.size))
669 .map_err(|_| err_ub!(InvalidDiscriminant(raw_discr.erase_tag())))?;
670 let real_discr = if discr_val.layout.ty.is_signed() {
671 // going from layout tag type to typeck discriminant type
672 // requires first sign extending with the discriminant layout
673 let sexted = sign_extend(bits_discr, discr_val.layout.size) as i128;
674 // and then zeroing with the typeck discriminant type
679 .expect("tagged layout corresponds to adt")
682 let size = layout::Integer::from_attr(self, discr_ty).size();
683 let truncatee = sexted as u128;
684 truncate(truncatee, size)
688 // Make sure we catch invalid discriminants
689 let index = match rval.layout.ty.kind {
691 adt.discriminants(self.tcx.tcx).find(|(_, var)| var.val == real_discr)
693 ty::Generator(def_id, substs, _) => {
694 let substs = substs.as_generator();
696 .discriminants(def_id, self.tcx.tcx)
697 .find(|(_, var)| var.val == real_discr)
699 _ => bug!("tagged layout for non-adt non-generator"),
701 .ok_or_else(|| err_ub!(InvalidDiscriminant(raw_discr.erase_tag())))?;
702 (real_discr, index.0)
704 layout::DiscriminantKind::Niche {
709 let variants_start = niche_variants.start().as_u32();
710 let variants_end = niche_variants.end().as_u32();
711 let raw_discr = raw_discr
713 .map_err(|_| err_ub!(InvalidDiscriminant(ScalarMaybeUndef::Undef)))?;
714 match raw_discr.to_bits_or_ptr(discr_val.layout.size, self) {
716 // The niche must be just 0 (which an inbounds pointer value never is)
717 let ptr_valid = niche_start == 0
718 && variants_start == variants_end
719 && !self.memory.ptr_may_be_null(ptr);
721 throw_ub!(InvalidDiscriminant(raw_discr.erase_tag().into()))
723 (dataful_variant.as_u32() as u128, dataful_variant)
726 // We need to use machine arithmetic to get the relative variant idx:
727 // variant_index_relative = discr_val - niche_start_val
729 self.layout_of(discr_layout.value.to_int_ty(*self.tcx))?;
730 let discr_val = ImmTy::from_uint(raw_discr, discr_layout);
731 let niche_start_val = ImmTy::from_uint(niche_start, discr_layout);
732 let variant_index_relative_val =
733 self.binary_op(mir::BinOp::Sub, discr_val, niche_start_val)?;
734 let variant_index_relative = variant_index_relative_val
736 .assert_bits(discr_val.layout.size);
737 // Check if this is in the range that indicates an actual discriminant.
738 if variant_index_relative <= u128::from(variants_end - variants_start) {
739 let variant_index_relative = u32::try_from(variant_index_relative)
740 .expect("we checked that this fits into a u32");
741 // Then computing the absolute variant idx should not overflow any more.
742 let variant_index = variants_start
743 .checked_add(variant_index_relative)
744 .expect("oveflow computing absolute variant idx");
746 (variant_index as usize)
751 .expect("tagged layout for non adt")
755 (u128::from(variant_index), VariantIdx::from_u32(variant_index))
757 (u128::from(dataful_variant.as_u32()), dataful_variant)