//! The virtual memory representation of the MIR interpreter.
+use std::borrow::Cow;
+use std::convert::TryFrom;
+use std::iter;
+use std::ops::{Add, Deref, DerefMut, Mul, Range, Sub};
+
+use rustc_ast::ast::Mutability;
+use rustc_data_structures::sorted_map::SortedMap;
+use rustc_target::abi::HasDataLayout;
+
use super::{
read_target_uint, write_target_uint, AllocId, InterpResult, Pointer, Scalar, ScalarMaybeUndef,
};
use crate::ty::layout::{Align, Size};
-use rustc_ast::ast::Mutability;
-use rustc_data_structures::sorted_map::SortedMap;
-use rustc_target::abi::HasDataLayout;
-use std::borrow::Cow;
-use std::iter;
-use std::ops::{Deref, DerefMut, Range};
-
// NOTE: When adding new fields, make sure to adjust the `Snapshot` impl in
// `src/librustc_mir/interpret/snapshot.rs`.
#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)]
/// Creates a read-only allocation initialized by the given bytes
pub fn from_bytes<'a>(slice: impl Into<Cow<'a, [u8]>>, align: Align) -> Self {
let bytes = slice.into().into_owned();
- let size = Size::from_bytes(bytes.len() as u64);
+ let size = Size::from_bytes(u64::try_from(bytes.len()).unwrap());
Self {
bytes,
relocations: Relocations::new(),
}
pub fn undef(size: Size, align: Align) -> Self {
- assert_eq!(size.bytes() as usize as u64, size.bytes());
Allocation {
- bytes: vec![0; size.bytes() as usize],
+ bytes: vec![0; usize::try_from(size.bytes()).unwrap()],
relocations: Relocations::new(),
undef_mask: UndefMask::new(size, false),
size,
/// Raw accessors. Provide access to otherwise private bytes.
impl<Tag, Extra> Allocation<Tag, Extra> {
pub fn len(&self) -> usize {
- self.size.bytes() as usize
+ usize::try_from(self.size.bytes()).unwrap()
}
/// Looks at a slice which may describe undefined bytes or describe a relocation. This differs
/// Returns the range of this allocation that was meant.
#[inline]
fn check_bounds(&self, offset: Size, size: Size) -> Range<usize> {
- let end = offset + size; // This does overflow checking.
- assert_eq!(
- end.bytes() as usize as u64,
- end.bytes(),
- "cannot handle this access on this host architecture"
- );
- let end = end.bytes() as usize;
+ let end = Size::add(offset, size); // This does overflow checking.
+ let end = usize::try_from(end.bytes()).expect("access too big for this host architecture");
assert!(
end <= self.len(),
"Out-of-bounds access at offset {}, size {} in allocation of size {}",
size.bytes(),
self.len()
);
- (offset.bytes() as usize)..end
+ usize::try_from(offset.bytes()).unwrap()..end
}
/// The last argument controls whether we error out when there are undefined
cx: &impl HasDataLayout,
ptr: Pointer<Tag>,
) -> InterpResult<'tcx, &[u8]> {
- assert_eq!(ptr.offset.bytes() as usize as u64, ptr.offset.bytes());
- let offset = ptr.offset.bytes() as usize;
+ let offset = usize::try_from(ptr.offset.bytes()).unwrap();
Ok(match self.bytes[offset..].iter().position(|&c| c == 0) {
Some(size) => {
- let size_with_null = Size::from_bytes((size + 1) as u64);
+ let size_with_null =
+ Size::from_bytes(u64::try_from(size.checked_add(1).unwrap()).unwrap());
// Go through `get_bytes` for checks and AllocationExtra hooks.
// We read the null, so we include it in the request, but we want it removed
// from the result, so we do subslicing.
let (lower, upper) = src.size_hint();
let len = upper.expect("can only write bounded iterators");
assert_eq!(lower, len, "can only write iterators with a precise length");
- let bytes = self.get_bytes_mut(cx, ptr, Size::from_bytes(len as u64))?;
+ let bytes = self.get_bytes_mut(cx, ptr, Size::from_bytes(u64::try_from(len).unwrap()))?;
// `zip` would stop when the first iterator ends; we want to definitely
// cover all of `bytes`.
for dest in bytes {
} else {
match self.relocations.get(&ptr.offset) {
Some(&(tag, alloc_id)) => {
- let ptr = Pointer::new_with_tag(alloc_id, Size::from_bytes(bits as u64), tag);
+ let ptr = Pointer::new_with_tag(
+ alloc_id,
+ Size::from_bytes(u64::try_from(bits).unwrap()),
+ tag,
+ );
return Ok(ScalarMaybeUndef::Scalar(ptr.into()));
}
None => {}
};
let bytes = match val.to_bits_or_ptr(type_size, cx) {
- Err(val) => val.offset.bytes() as u128,
+ Err(val) => u128::from(val.offset.bytes()),
Ok(data) => data,
};
// We have to go back `pointer_size - 1` bytes, as that one would still overlap with
// the beginning of this range.
let start = ptr.offset.bytes().saturating_sub(cx.data_layout().pointer_size.bytes() - 1);
- let end = ptr.offset + size; // This does overflow checking.
+ let end = Size::add(ptr.offset, size); // This does overflow checking.
self.relocations.range(Size::from_bytes(start)..end)
}
)
};
let start = ptr.offset;
- let end = start + size;
+ let end = Size::add(start, size);
// Mark parts of the outermost relocations as undefined if they partially fall outside the
// given range.
#[inline]
fn check_defined(&self, ptr: Pointer<Tag>, size: Size) -> InterpResult<'tcx> {
self.undef_mask
- .is_range_defined(ptr.offset, ptr.offset + size)
+ .is_range_defined(ptr.offset, Size::add(ptr.offset, size))
.or_else(|idx| throw_ub!(InvalidUndefBytes(Some(Pointer::new(ptr.alloc_id, idx)))))
}
if size.bytes() == 0 {
return;
}
- self.undef_mask.set_range(ptr.offset, ptr.offset + size, new_state);
+ self.undef_mask.set_range(ptr.offset, Size::add(ptr.offset, size), new_state);
}
}
for i in 1..size.bytes() {
// FIXME: optimize to bitshift the current undef block's bits and read the top bit.
- if self.undef_mask.get(src.offset + Size::from_bytes(i)) == cur {
+ if self.undef_mask.get(Size::add(src.offset, Size::from_bytes(i))) == cur {
cur_len += 1;
} else {
ranges.push(cur_len);
if defined.ranges.len() <= 1 {
self.undef_mask.set_range_inbounds(
dest.offset,
- dest.offset + size * repeat,
+ Size::add(dest.offset, Size::mul(size, repeat)),
defined.initial,
);
return;
for i in 0..length {
new_relocations.extend(relocations.iter().map(|&(offset, reloc)| {
// compute offset for current repetition
- let dest_offset = dest.offset + (i * size);
+ let dest_offset = Size::add(dest.offset, Size::mul(size, i));
(
// shift offsets from source allocation to destination allocation
- offset + dest_offset - src.offset,
+ Size::sub(Size::add(offset, dest_offset), src.offset),
reloc,
)
}));
if amount.bytes() == 0 {
return;
}
- let unused_trailing_bits = self.blocks.len() as u64 * Self::BLOCK_SIZE - self.len.bytes();
+ let unused_trailing_bits =
+ u64::try_from(self.blocks.len()).unwrap() * Self::BLOCK_SIZE - self.len.bytes();
if amount.bytes() > unused_trailing_bits {
let additional_blocks = amount.bytes() / Self::BLOCK_SIZE + 1;
- assert_eq!(additional_blocks as usize as u64, additional_blocks);
self.blocks.extend(
// FIXME(oli-obk): optimize this by repeating `new_state as Block`.
- iter::repeat(0).take(additional_blocks as usize),
+ iter::repeat(0).take(usize::try_from(additional_blocks).unwrap()),
);
}
let start = self.len;
self.len += amount;
- self.set_range_inbounds(start, start + amount, new_state);
+ self.set_range_inbounds(start, Size::add(start, amount), new_state);
}
}
let bits = bits.bytes();
let a = bits / UndefMask::BLOCK_SIZE;
let b = bits % UndefMask::BLOCK_SIZE;
- assert_eq!(a as usize as u64, a);
- assert_eq!(b as usize as u64, b);
- (a as usize, b as usize)
+ (usize::try_from(a).unwrap(), usize::try_from(b).unwrap())
}
mod queries;
mod value;
+use std::convert::TryFrom;
+use std::fmt;
+use std::io;
+use std::num::NonZeroU32;
+use std::sync::atomic::{AtomicU32, Ordering};
+
+use byteorder::{BigEndian, LittleEndian, ReadBytesExt, WriteBytesExt};
+use rustc_ast::ast::LitKind;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::sync::{HashMapExt, Lock};
+use rustc_data_structures::tiny_list::TinyList;
+use rustc_hir::def_id::DefId;
+use rustc_macros::HashStable;
+use rustc_serialize::{Decodable, Encodable, Encoder};
+
+use crate::mir;
+use crate::ty::codec::TyDecoder;
+use crate::ty::layout::{self, Size};
+use crate::ty::subst::GenericArgKind;
+use crate::ty::{self, Instance, Ty, TyCtxt};
+
pub use self::error::{
struct_error, ConstEvalErr, ConstEvalRawResult, ConstEvalResult, ErrorHandled, FrameInfo,
InterpError, InterpErrorInfo, InterpResult, InvalidProgramInfo, MachineStopType,
pub use self::pointer::{CheckInAllocMsg, Pointer, PointerArithmetic};
-use crate::mir;
-use crate::ty::codec::TyDecoder;
-use crate::ty::layout::{self, Size};
-use crate::ty::subst::GenericArgKind;
-use crate::ty::{self, Instance, Ty, TyCtxt};
-use byteorder::{BigEndian, LittleEndian, ReadBytesExt, WriteBytesExt};
-use rustc_ast::ast::LitKind;
-use rustc_data_structures::fx::FxHashMap;
-use rustc_data_structures::sync::{HashMapExt, Lock};
-use rustc_data_structures::tiny_list::TinyList;
-use rustc_hir::def_id::DefId;
-use rustc_macros::HashStable;
-use rustc_serialize::{Decodable, Encodable, Encoder};
-use std::fmt;
-use std::io;
-use std::num::NonZeroU32;
-use std::sync::atomic::{AtomicU32, Ordering};
-
/// Uniquely identifies one of the following:
/// - A constant
/// - A static
D: TyDecoder<'tcx>,
{
// Read the index of the allocation.
- let idx = decoder.read_u32()? as usize;
- let pos = self.state.data_offsets[idx] as usize;
+ let idx = usize::try_from(decoder.read_u32()?).unwrap();
+ let pos = usize::try_from(self.state.data_offsets[idx]).unwrap();
// Decode the `AllocDiscriminant` now so that we know if we have to reserve an
// `AllocId`.
/// This should be called by all the other methods before returning!
#[inline]
fn truncate_to_ptr(&self, (val, over): (u64, bool)) -> (u64, bool) {
- let val = val as u128;
+ let val = u128::from(val);
let max_ptr_plus_1 = 1u128 << self.pointer_size().bits();
- ((val % max_ptr_plus_1) as u64, over || val >= max_ptr_plus_1)
+ (u64::try_from(val % max_ptr_plus_1).unwrap(), over || val >= max_ptr_plus_1)
}
#[inline]
+use std::convert::TryFrom;
+
use rustc_apfloat::{
ieee::{Double, Single},
Float,
#[inline(always)]
fn check_data(data: u128, size: u8) {
debug_assert_eq!(
- truncate(data, Size::from_bytes(size as u64)),
+ truncate(data, Size::from_bytes(u64::from(size))),
data,
"Scalar value {:#x} exceeds size of {} bytes",
data,
let dl = cx.data_layout();
match self {
Scalar::Raw { data, size } => {
- assert_eq!(size as u64, dl.pointer_size.bytes());
- Ok(Scalar::Raw { data: dl.offset(data as u64, i.bytes())? as u128, size })
+ assert_eq!(u64::from(size), dl.pointer_size.bytes());
+ Ok(Scalar::Raw {
+ data: u128::from(dl.offset(u64::try_from(data).unwrap(), i.bytes())?),
+ size,
+ })
}
Scalar::Ptr(ptr) => ptr.offset(i, dl).map(Scalar::Ptr),
}
let dl = cx.data_layout();
match self {
Scalar::Raw { data, size } => {
- assert_eq!(size as u64, dl.pointer_size.bytes());
- Scalar::Raw { data: dl.overflowing_offset(data as u64, i.bytes()).0 as u128, size }
+ assert_eq!(u64::from(size), dl.pointer_size.bytes());
+ Scalar::Raw {
+ data: u128::from(
+ dl.overflowing_offset(u64::try_from(data).unwrap(), i.bytes()).0,
+ ),
+ size,
+ }
}
Scalar::Ptr(ptr) => Scalar::Ptr(ptr.wrapping_offset(i, dl)),
}
let dl = cx.data_layout();
match self {
Scalar::Raw { data, size } => {
- assert_eq!(size as u64, dl.pointer_size().bytes());
- Ok(Scalar::Raw { data: dl.signed_offset(data as u64, i)? as u128, size })
+ assert_eq!(u64::from(size), dl.pointer_size.bytes());
+ Ok(Scalar::Raw {
+ data: u128::from(dl.signed_offset(u64::try_from(data).unwrap(), i)?),
+ size,
+ })
}
Scalar::Ptr(ptr) => ptr.signed_offset(i, dl).map(Scalar::Ptr),
}
let dl = cx.data_layout();
match self {
Scalar::Raw { data, size } => {
- assert_eq!(size as u64, dl.pointer_size.bytes());
+ assert_eq!(u64::from(size), dl.pointer_size.bytes());
Scalar::Raw {
- data: dl.overflowing_signed_offset(data as u64, i128::from(i)).0 as u128,
+ data: u128::from(
+ dl.overflowing_signed_offset(u64::try_from(data).unwrap(), i128::from(i)).0,
+ ),
size,
}
}
#[inline]
pub fn from_u8(i: u8) -> Self {
// Guaranteed to be truncated and does not need sign extension.
- Scalar::Raw { data: i as u128, size: 1 }
+ Scalar::Raw { data: i.into(), size: 1 }
}
#[inline]
pub fn from_u16(i: u16) -> Self {
// Guaranteed to be truncated and does not need sign extension.
- Scalar::Raw { data: i as u128, size: 2 }
+ Scalar::Raw { data: i.into(), size: 2 }
}
#[inline]
pub fn from_u32(i: u32) -> Self {
// Guaranteed to be truncated and does not need sign extension.
- Scalar::Raw { data: i as u128, size: 4 }
+ Scalar::Raw { data: i.into(), size: 4 }
}
#[inline]
pub fn from_u64(i: u64) -> Self {
// Guaranteed to be truncated and does not need sign extension.
- Scalar::Raw { data: i as u128, size: 8 }
+ Scalar::Raw { data: i.into(), size: 8 }
}
#[inline]
assert_ne!(target_size.bytes(), 0, "you should never look at the bits of a ZST");
match self {
Scalar::Raw { data, size } => {
- assert_eq!(target_size.bytes(), size as u64);
+ assert_eq!(target_size.bytes(), u64::from(size));
Scalar::check_data(data, size);
Ok(data)
}
assert_ne!(target_size.bytes(), 0, "you should never look at the bits of a ZST");
match self {
Scalar::Raw { data, size } => {
- assert_eq!(target_size.bytes(), size as u64);
+ assert_eq!(target_size.bytes(), u64::from(size));
Scalar::check_data(data, size);
Ok(data)
}
/// Converts the scalar to produce an `u8`. Fails if the scalar is a pointer.
pub fn to_u8(self) -> InterpResult<'static, u8> {
- self.to_unsigned_with_bit_width(8).map(|v| v as u8)
+ self.to_unsigned_with_bit_width(8).map(|v| u8::try_from(v).unwrap())
}
/// Converts the scalar to produce an `u16`. Fails if the scalar is a pointer.
pub fn to_u16(self) -> InterpResult<'static, u16> {
- self.to_unsigned_with_bit_width(16).map(|v| v as u16)
+ self.to_unsigned_with_bit_width(16).map(|v| u16::try_from(v).unwrap())
}
/// Converts the scalar to produce an `u32`. Fails if the scalar is a pointer.
pub fn to_u32(self) -> InterpResult<'static, u32> {
- self.to_unsigned_with_bit_width(32).map(|v| v as u32)
+ self.to_unsigned_with_bit_width(32).map(|v| u32::try_from(v).unwrap())
}
/// Converts the scalar to produce an `u64`. Fails if the scalar is a pointer.
pub fn to_u64(self) -> InterpResult<'static, u64> {
- self.to_unsigned_with_bit_width(64).map(|v| v as u64)
+ self.to_unsigned_with_bit_width(64).map(|v| u64::try_from(v).unwrap())
}
pub fn to_machine_usize(self, cx: &impl HasDataLayout) -> InterpResult<'static, u64> {
let b = self.to_bits(cx.data_layout().pointer_size)?;
- Ok(b as u64)
+ Ok(u64::try_from(b).unwrap())
}
#[inline]
/// Converts the scalar to produce an `i8`. Fails if the scalar is a pointer.
pub fn to_i8(self) -> InterpResult<'static, i8> {
- self.to_signed_with_bit_width(8).map(|v| v as i8)
+ self.to_signed_with_bit_width(8).map(|v| i8::try_from(v).unwrap())
}
/// Converts the scalar to produce an `i16`. Fails if the scalar is a pointer.
pub fn to_i16(self) -> InterpResult<'static, i16> {
- self.to_signed_with_bit_width(16).map(|v| v as i16)
+ self.to_signed_with_bit_width(16).map(|v| i16::try_from(v).unwrap())
}
/// Converts the scalar to produce an `i32`. Fails if the scalar is a pointer.
pub fn to_i32(self) -> InterpResult<'static, i32> {
- self.to_signed_with_bit_width(32).map(|v| v as i32)
+ self.to_signed_with_bit_width(32).map(|v| i32::try_from(v).unwrap())
}
/// Converts the scalar to produce an `i64`. Fails if the scalar is a pointer.
pub fn to_i64(self) -> InterpResult<'static, i64> {
- self.to_signed_with_bit_width(64).map(|v| v as i64)
+ self.to_signed_with_bit_width(64).map(|v| i64::try_from(v).unwrap())
}
pub fn to_machine_isize(self, cx: &impl HasDataLayout) -> InterpResult<'static, i64> {
let sz = cx.data_layout().pointer_size;
let b = self.to_bits(sz)?;
let b = sign_extend(b, sz) as i128;
- Ok(b as i64)
+ Ok(i64::try_from(b).unwrap())
}
#[inline]
pub fn to_f32(self) -> InterpResult<'static, Single> {
// Going through `u32` to check size and truncation.
- Ok(Single::from_bits(self.to_u32()? as u128))
+ Ok(Single::from_bits(self.to_u32()?.into()))
}
#[inline]
pub fn to_f64(self) -> InterpResult<'static, Double> {
// Going through `u64` to check size and truncation.
- Ok(Double::from_bits(self.to_u64()? as u128))
+ Ok(Double::from_bits(self.to_u64()?.into()))
}
}
data.get_bytes(
cx,
// invent a pointer, only the offset is relevant anyway
- Pointer::new(AllocId(0), Size::from_bytes(start as u64)),
- Size::from_bytes(len as u64),
+ Pointer::new(AllocId(0), Size::from_bytes(u64::try_from(start).unwrap())),
+ Size::from_bytes(u64::try_from(len).unwrap()),
)
.unwrap_or_else(|err| bug!("const slice is invalid: {:?}", err))
} else {
+use std::convert::TryFrom;
+
use rustc::ty::adjustment::PointerCast;
use rustc::ty::layout::{self, Size, TyLayout};
use rustc::ty::{self, Ty, TypeAndMut, TypeFoldable};
Char => {
// `u8` to `char` cast
- assert_eq!(v as u8 as u128, v);
- Ok(Scalar::from_uint(v, Size::from_bytes(4)))
+ Ok(Scalar::from_uint(u8::try_from(v).unwrap(), Size::from_bytes(4)))
}
// Casts to bool are not permitted by rustc, no need to handle them here.
match dest_ty.kind {
// float -> uint
Uint(t) => {
+ // FIXME: can we make `bit_width` return a type more compatible with `Size::bits`?
let width = t.bit_width().unwrap_or_else(|| self.pointer_size().bits() as usize);
let v = f.to_u128(width).value;
// This should already fit the bit width
use std::cell::Cell;
use std::fmt::Write;
use std::mem;
+use std::ops::Add;
use rustc::ich::StableHashingContext;
use rustc::mir;
// and it also rounds up to alignment, which we want to avoid,
// as the unsized field's alignment could be smaller.
assert!(!layout.ty.is_simd());
+ assert!(layout.fields.count() > 0);
trace!("DST layout: {:?}", layout);
let sized_size = layout.fields.offset(layout.fields.count() - 1);
// here. But this is where the add would go.)
// Return the sum of sizes and max of aligns.
- let size = sized_size + unsized_size;
+ let size = Size::add(sized_size, unsized_size);
// Choose max of two known alignments (combined value must
// be aligned according to more restrictive of the two).
Primitive::Int(integer, _) => integer.size(),
_ => bug!("invalid `{}` argument: {:?}", name, bits),
};
- let extra = 128 - size.bits() as u128;
+ let extra = 128 - u128::from(size.bits());
let bits_out = match name {
- sym::ctpop => bits.count_ones() as u128,
- sym::ctlz => bits.leading_zeros() as u128 - extra,
- sym::cttz => (bits << extra).trailing_zeros() as u128 - extra,
+ sym::ctpop => u128::from(bits.count_ones()),
+ sym::ctlz => u128::from(bits.leading_zeros()) - extra,
+ sym::cttz => u128::from((bits << extra).trailing_zeros()) - extra,
sym::bswap => (bits << extra).swap_bytes(),
sym::bitreverse => (bits << extra).reverse_bits(),
_ => bug!("not a numeric intrinsic: {}", name),
let val_bits = self.force_bits(val, layout.size)?;
let raw_shift = self.read_scalar(args[1])?.not_undef()?;
let raw_shift_bits = self.force_bits(raw_shift, layout.size)?;
- let width_bits = layout.size.bits() as u128;
+ let width_bits = u128::from(layout.size.bits());
let shift_bits = raw_shift_bits % width_bits;
let inv_shift_bits = (width_bits - shift_bits) % width_bits;
let result_bits = if intrinsic_name == sym::rotate_left {
+use std::convert::TryFrom;
+
use rustc::middle::lang_items::PanicLocationLangItem;
use rustc::ty::subst::Subst;
use rustc_span::{Span, Symbol};
let caller = self.tcx.sess.source_map().lookup_char_pos(topmost.lo());
(
Symbol::intern(&caller.file.name.to_string()),
- caller.line as u32,
- caller.col_display as u32 + 1,
+ u32::try_from(caller.line).unwrap(),
+ u32::try_from(caller.col_display).unwrap().checked_add(1).unwrap(),
)
}
use std::borrow::Cow;
use std::collections::VecDeque;
+use std::convert::TryFrom;
+use std::ops::{Add, Mul};
use std::ptr;
use rustc::ty::layout::{Align, HasDataLayout, Size, TargetDataLayout};
};
Ok(match normalized.to_bits_or_ptr(self.pointer_size(), self) {
Ok(bits) => {
- let bits = bits as u64; // it's ptr-sized
+ let bits = u64::try_from(bits).unwrap(); // it's ptr-sized
assert!(size.bytes() == 0);
// Must be non-NULL.
if bits == 0 {
}
if alloc.undef_mask().is_range_defined(i, i + Size::from_bytes(1)).is_ok() {
// this `as usize` is fine, since `i` came from a `usize`
- let i = i.bytes() as usize;
+ let i = usize::try_from(i.bytes()).unwrap();
// Checked definedness (and thus range) and relocations. This access also doesn't
// influence interpreter execution but is only for debugging.
) -> InterpResult<'tcx> {
let src = src.into_iter();
let size = Size::from_bytes(src.size_hint().0 as u64);
- // `write_bytes` checks that this lower bound matches the upper bound matches reality.
+ // `write_bytes` checks that this lower bound `size` matches the upper bound and reality.
let ptr = match self.check_ptr_access(ptr, size, Align::from_bytes(1).unwrap())? {
Some(ptr) => ptr,
None => return Ok(()), // zero-sized access
let tcx = self.tcx.tcx;
- // The bits have to be saved locally before writing to dest in case src and dest overlap.
- assert_eq!(size.bytes() as usize as u64, size.bytes());
-
// This checks relocation edges on the src.
let src_bytes =
self.get_raw(src.alloc_id)?.get_bytes_with_undef_and_ptr(&tcx, src, size)?.as_ptr();
let dest_bytes =
- self.get_raw_mut(dest.alloc_id)?.get_bytes_mut(&tcx, dest, size * length)?;
+ self.get_raw_mut(dest.alloc_id)?.get_bytes_mut(&tcx, dest, Size::mul(size, length))?;
// If `dest_bytes` is empty we just optimize to not run anything for zsts.
// See #67539
// touched if the bytes stay undef for the whole interpreter execution. On contemporary
// operating system this can avoid physically allocating the page.
let dest_alloc = self.get_raw_mut(dest.alloc_id)?;
- dest_alloc.mark_definedness(dest, size * length, false);
+ dest_alloc.mark_definedness(dest, Size::mul(size, length), false);
dest_alloc.mark_relocation_range(relocations);
return Ok(());
}
// The pointers above remain valid even if the `HashMap` table is moved around because they
// point into the `Vec` storing the bytes.
unsafe {
- assert_eq!(size.bytes() as usize as u64, size.bytes());
if src.alloc_id == dest.alloc_id {
if nonoverlapping {
- if (src.offset <= dest.offset && src.offset + size > dest.offset)
- || (dest.offset <= src.offset && dest.offset + size > src.offset)
+ if (src.offset <= dest.offset && Size::add(src.offset, size) > dest.offset)
+ || (dest.offset <= src.offset && Size::add(dest.offset, size) > src.offset)
{
throw_ub_format!("copy_nonoverlapping called on overlapping ranges")
}
for i in 0..length {
ptr::copy(
src_bytes,
- dest_bytes.offset((size.bytes() * i) as isize),
- size.bytes() as usize,
+ dest_bytes.offset(isize::try_from(Size::mul(size, i).bytes()).unwrap()),
+ usize::try_from(size.bytes()).unwrap(),
);
}
} else {
for i in 0..length {
ptr::copy_nonoverlapping(
src_bytes,
- dest_bytes.offset((size.bytes() * i) as isize),
- size.bytes() as usize,
+ dest_bytes.offset(isize::try_from(Size::mul(size, i).bytes()).unwrap()),
+ usize::try_from(size.bytes()).unwrap(),
);
}
}
) -> InterpResult<'tcx, u128> {
match scalar.to_bits_or_ptr(size, self) {
Ok(bits) => Ok(bits),
- Err(ptr) => Ok(M::ptr_to_int(&self, ptr)? as u128),
+ Err(ptr) => Ok(M::ptr_to_int(&self, ptr)?.into()),
}
}
}
// Turn the wide MPlace into a string (must already be dereferenced!)
pub fn read_str(&self, mplace: MPlaceTy<'tcx, M::PointerTag>) -> InterpResult<'tcx, &str> {
let len = mplace.len(self)?;
- let bytes = self.memory.read_bytes(mplace.ptr, Size::from_bytes(len as u64))?;
+ let bytes = self.memory.read_bytes(mplace.ptr, Size::from_bytes(u64::from(len)))?;
let str = ::std::str::from_utf8(bytes)
.map_err(|err| err_ub_format!("this string is not valid UTF-8: {}", err))?;
Ok(str)
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
use rustc::mir::ProjectionElem::*;
Ok(match *proj_elem {
- Field(field, _) => self.operand_field(base, field.index() as u64)?,
+ Field(field, _) => self.operand_field(base, u64::try_from(field.index()).unwrap())?,
Downcast(_, variant) => self.operand_downcast(base, variant)?,
Deref => self.deref_operand(base)?.into(),
Subslice { .. } | ConstantIndex { .. } | Index(_) => {
// where none should happen.
let ptr = Pointer::new(
self.tcx.alloc_map.lock().create_memory_alloc(data),
- Size::from_bytes(start as u64), // offset: `start`
+ Size::from_bytes(start.try_into().unwrap()), // offset: `start`
);
Operand::Immediate(Immediate::new_slice(
self.tag_global_base_pointer(ptr).into(),
- (end - start) as u64, // len: `end - start`
+ u64::try_from(end.checked_sub(start).unwrap()).unwrap(), // len: `end - start`
self,
))
}
.layout
.ty
.discriminant_for_variant(*self.tcx, index)
- .map_or(index.as_u32() as u128, |discr| discr.val);
+ .map_or(u128::from(index.as_u32()), |discr| discr.val);
return Ok((discr_val, index));
}
layout::Variants::Multiple {
};
// read raw discriminant value
- let discr_op = self.operand_field(rval, discr_index as u64)?;
+ let discr_op = self.operand_field(rval, u64::try_from(discr_index).unwrap())?;
let discr_val = self.read_immediate(discr_op)?;
let raw_discr = discr_val.to_scalar_or_undef();
trace!("discr value: {:?}", raw_discr);
if !ptr_valid {
throw_ub!(InvalidDiscriminant(raw_discr.erase_tag().into()))
}
- (dataful_variant.as_u32() as u128, dataful_variant)
+ (u128::from(dataful_variant.as_u32()), dataful_variant)
}
Ok(raw_discr) => {
// We need to use machine arithmetic to get the relative variant idx:
.expect("tagged layout for non adt")
.variants
.len();
- assert!((variant_index as usize) < variants_len);
+ assert!(usize::try_from(variant_index).unwrap() < variants_len);
(u128::from(variant_index), VariantIdx::from_u32(variant_index))
} else {
(u128::from(dataful_variant.as_u32()), dataful_variant)
use std::convert::TryFrom;
use std::hash::Hash;
+use std::ops::Mul;
use rustc::mir;
use rustc::mir::interpret::truncate;
// This can only be reached in ConstProp and non-rustc-MIR.
throw_ub!(BoundsCheckFailed { len, index: field });
}
- stride * field
+ Size::mul(stride, field) // `Size` multiplication is checked
}
layout::FieldPlacement::Union(count) => {
assert!(
- field < count as u64,
+ field < u64::try_from(count).unwrap(),
"Tried to access field {} of union {:#?} with {} fields",
field,
base.layout,
}
};
// the only way conversion can fail if is this is an array (otherwise we already panicked
- // above). In that case, all fields are equal.
+ // above). In that case, all fields have the same layout.
let field_layout = base.layout.field(self, usize::try_from(field).unwrap_or(0))?;
// Offset may need adjustment for unsized fields.
};
let layout = base.layout.field(self, 0)?;
let dl = &self.tcx.data_layout;
- Ok((0..len).map(move |i| base.offset(i * stride, MemPlaceMeta::None, layout, dl)))
+ Ok((0..len).map(move |i| base.offset(Size::mul(stride, i), MemPlaceMeta::None, layout, dl)))
}
fn mplace_subslice(
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
let len = base.len(self)?; // also asserts that we have a type where this makes sense
let actual_to = if from_end {
- if from + to > len {
+ if from.checked_add(to).map_or(true, |to| to > len) {
// This can only be reached in ConstProp and non-rustc-MIR.
- throw_ub!(BoundsCheckFailed { len: len as u64, index: from as u64 + to as u64 });
+ throw_ub!(BoundsCheckFailed { len: len, index: from.saturating_add(to) });
}
- len - to
+ len.checked_sub(to).unwrap()
} else {
to
};
// Not using layout method because that works with usize, and does not work with slices
// (that have count 0 in their layout).
let from_offset = match base.layout.fields {
- layout::FieldPlacement::Array { stride, .. } => stride * from,
+ layout::FieldPlacement::Array { stride, .. } => Size::mul(stride, from), // `Size` multiplication is checked
_ => bug!("Unexpected layout of index access: {:#?}", base.layout),
};
// Compute meta and new layout
- let inner_len = actual_to - from;
+ let inner_len = actual_to.checked_sub(from).unwrap();
let (meta, ty) = match base.layout.ty.kind {
// It is not nice to match on the type, but that seems to be the only way to
// implement this.
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
use rustc::mir::ProjectionElem::*;
Ok(match *proj_elem {
- Field(field, _) => self.mplace_field(base, field.index() as u64)?,
+ Field(field, _) => self.mplace_field(base, u64::try_from(field.index()).unwrap())?,
Downcast(_, variant) => self.mplace_downcast(base, variant)?,
Deref => self.deref_operand(base.into())?,
ConstantIndex { offset, min_length, from_end } => {
let n = base.len(self)?;
- if n < min_length as u64 {
+ if n < u64::from(min_length) {
// This can only be reached in ConstProp and non-rustc-MIR.
- throw_ub!(BoundsCheckFailed { len: min_length as u64, index: n as u64 });
+ throw_ub!(BoundsCheckFailed { len: min_length.into(), index: n.into() });
}
let index = if from_end {
- assert!(0 < offset && offset - 1 < min_length);
- n - u64::from(offset)
+ assert!(0 < offset && offset <= min_length);
+ n.checked_sub(u64::from(offset)).unwrap()
} else {
assert!(offset < min_length);
u64::from(offset)
) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
use rustc::mir::ProjectionElem::*;
Ok(match *proj_elem {
- Field(field, _) => self.place_field(base, field.index() as u64)?,
+ Field(field, _) => self.place_field(base, u64::try_from(field.index()).unwrap())?,
Downcast(_, variant) => self.place_downcast(base, variant)?,
Deref => self.deref_operand(self.place_to_op(base)?)?.into(),
// For the other variants, we have to force an allocation.
kind: MemoryKind<M::MemoryKind>,
) -> MPlaceTy<'tcx, M::PointerTag> {
let ptr = self.memory.allocate_bytes(str.as_bytes(), kind);
- let meta = Scalar::from_uint(str.len() as u128, self.pointer_size());
+ let meta = Scalar::from_uint(u128::try_from(str.len()).unwrap(), self.pointer_size());
let mplace = MemPlace {
ptr: ptr.into(),
align: Align::from_bytes(1).unwrap(),
let size = discr_layout.value.size(self);
let discr_val = truncate(discr_val, size);
- let discr_dest = self.place_field(dest, discr_index as u64)?;
+ let discr_dest = self.place_field(dest, u64::try_from(discr_index).unwrap())?;
self.write_scalar(Scalar::from_uint(discr_val, size), discr_dest)?;
}
layout::Variants::Multiple {
niche_start_val,
)?;
// Write result.
- let niche_dest = self.place_field(dest, discr_index as u64)?;
+ let niche_dest = self.place_field(dest, u64::try_from(discr_index).unwrap())?;
self.write_immediate(*discr_val, niche_dest)?;
}
}
//!
//! The main entry point is the `step` method.
+use std::convert::TryFrom;
+
use rustc::mir;
use rustc::mir::interpret::{InterpResult, PointerArithmetic, Scalar};
use rustc::ty::layout::LayoutOf;
// Ignore zero-sized fields.
if !op.layout.is_zst() {
let field_index = active_field_index.unwrap_or(i);
- let field_dest = self.place_field(dest, field_index as u64)?;
+ let field_dest =
+ self.place_field(dest, u64::try_from(field_index).unwrap())?;
self.copy_op(op, field_dest)?;
}
}
use std::borrow::Cow;
+use std::convert::TryFrom;
use rustc::ty::layout::{self, LayoutOf, TyLayout};
use rustc::ty::Instance;
trace!("SwitchInt({:?})", *discr);
// Branch to the `otherwise` case by default, if no match is found.
+ assert!(targets.len() > 0);
let mut target_block = targets[targets.len() - 1];
for (index, &const_int) in values.iter().enumerate() {
};
// Find and consult vtable
let vtable = receiver_place.vtable();
- let drop_fn = self.get_vtable_slot(vtable, idx)?;
+ let drop_fn = self.get_vtable_slot(vtable, u64::try_from(idx).unwrap())?;
// `*mut receiver_place.layout.ty` is almost the layout that we
// want for args[0]: We have to project to field 0 because we want
-use super::{FnVal, InterpCx, Machine, MemoryKind};
+use std::ops::Mul;
use rustc::mir::interpret::{InterpResult, Pointer, PointerArithmetic, Scalar};
use rustc::ty::layout::{Align, HasDataLayout, LayoutOf, Size};
use rustc::ty::{self, Instance, Ty, TypeFoldable};
+use super::{FnVal, InterpCx, Machine, MemoryKind};
+
impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
/// Creates a dynamic vtable for the given type and vtable origin. This is used only for
/// objects.
pub fn get_vtable_slot(
&self,
vtable: Scalar<M::PointerTag>,
- idx: usize,
+ idx: u64,
) -> InterpResult<'tcx, FnVal<'tcx, M::ExtraFnVal>> {
let ptr_size = self.pointer_size();
// Skip over the 'drop_ptr', 'size', and 'align' fields.
- let vtable_slot = vtable.ptr_offset(ptr_size * (idx as u64 + 3), self)?;
+ let vtable_slot =
+ vtable.ptr_offset(Size::mul(ptr_size, idx.checked_add(3).unwrap()), self)?;
let vtable_slot = self
.memory
.check_ptr_access(vtable_slot, ptr_size, self.tcx.data_layout.pointer_align.abi)?
//! That's useful because it means other passes (e.g. promotion) can rely on `const`s
//! to be const-safe.
+use std::convert::TryFrom;
use std::fmt::Write;
-use std::ops::RangeInclusive;
+use std::ops::{Mul, RangeInclusive};
use rustc::ty;
-use rustc::ty::layout::{self, LayoutOf, TyLayout, VariantIdx};
+use rustc::ty::layout::{self, LayoutOf, Size, TyLayout, VariantIdx};
use rustc_data_structures::fx::FxHashSet;
use rustc_hir as hir;
use rustc_span::symbol::{sym, Symbol};
// This is the element type size.
let layout = self.ecx.layout_of(tys)?;
// This is the size in bytes of the whole array.
- let size = layout.size * len;
+ let size = Size::mul(layout.size, len);
// Size is not 0, get a pointer.
let ptr = self.ecx.force_ptr(mplace.ptr)?;
// Some byte was undefined, determine which
// element that byte belongs to so we can
// provide an index.
- let i = (ptr.offset.bytes() / layout.size.bytes()) as usize;
+ let i = usize::try_from(ptr.offset.bytes() / layout.size.bytes())
+ .unwrap();
self.path.push(PathElem::ArrayElem(i));
throw_validation_failure!("undefined bytes", self.path)
//! Visitor for a run-time value with a given layout: Traverse enums, structs and other compound
//! types until we arrive at the leaves, with custom handling for primitive types.
+use std::convert::TryFrom;
+
use rustc::mir::interpret::InterpResult;
use rustc::ty;
use rustc::ty::layout::{self, TyLayout, VariantIdx};
// errors: Projecting to a field needs access to `ecx`.
let fields: Vec<InterpResult<'tcx, Self::V>> =
(0..offsets.len()).map(|i| {
- v.project_field(self.ecx(), i as u64)
+ v.project_field(self.ecx(), u64::try_from(i).unwrap())
})
.collect();
self.visit_aggregate(v, fields.into_iter())?;