use mir;
use ty::{FnSig, Ty, layout};
+use ty::layout::{Size, Align};
use super::{
MemoryPointer, Lock, AccessKind
PointerOutOfBounds {
ptr: MemoryPointer,
access: bool,
- allocation_size: u64,
+ allocation_size: Size,
},
InvalidNullPointerUsage,
ReadPointerAsBytes,
TlsOutOfBounds,
AbiViolation(String),
AlignmentCheckFailed {
- required: u64,
- has: u64,
+ required: Align,
+ has: Align,
},
MemoryLockViolation {
ptr: MemoryPointer,
DeallocatedWrongMemoryKind(String, String),
ReallocateNonBasePtr,
DeallocateNonBasePtr,
- IncorrectAllocationInformation(u64, usize, u64, u64),
+ IncorrectAllocationInformation(Size, Size, Align, Align),
Layout(layout::LayoutError<'tcx>),
HeapAllocZeroBytes,
HeapAllocNonPowerOfTwoAlignment(u64),
PointerOutOfBounds { ptr, access, allocation_size } => {
write!(f, "{} at offset {}, outside bounds of allocation {} which has size {}",
if access { "memory access" } else { "pointer computed" },
- ptr.offset, ptr.alloc_id, allocation_size)
+ ptr.offset.bytes(), ptr.alloc_id, allocation_size.bytes())
},
MemoryLockViolation { ptr, len, frame, access, ref lock } => {
write!(f, "{:?} access by frame {} at {:?}, size {}, is in conflict with lock {:?}",
write!(f, "tried to interpret an invalid 32-bit value as a char: {}", c),
AlignmentCheckFailed { required, has } =>
write!(f, "tried to access memory with alignment {}, but alignment {} is required",
- has, required),
+ has.abi(), required.abi()),
TypeNotPrimitive(ty) =>
write!(f, "expected primitive type, got {}", ty),
Layout(ref err) =>
MachineError(ref inner) =>
write!(f, "{}", inner),
IncorrectAllocationInformation(size, size2, align, align2) =>
- write!(f, "incorrect alloc info: expected size {} and align {}, got size {} and align {}", size, align, size2, align2),
+ write!(f, "incorrect alloc info: expected size {} and align {}, got size {} and align {}", size.bytes(), align.abi(), size2.bytes(), align2.abi()),
_ => write!(f, "{}", self.description()),
}
}
use mir;
use hir::def_id::DefId;
use ty::{self, TyCtxt};
-use ty::layout::{self, Align, HasDataLayout};
+use ty::layout::{self, Align, HasDataLayout, Size};
use middle::region;
use std::iter;
use std::io;
#[derive(Copy, Clone, Debug, Eq, PartialEq, RustcEncodable, RustcDecodable, Hash)]
pub struct MemoryPointer {
pub alloc_id: AllocId,
- pub offset: u64,
+ pub offset: Size,
}
impl<'tcx> MemoryPointer {
- pub fn new(alloc_id: AllocId, offset: u64) -> Self {
+ pub fn new(alloc_id: AllocId, offset: Size) -> Self {
MemoryPointer { alloc_id, offset }
}
pub(crate) fn wrapping_signed_offset<C: HasDataLayout>(self, i: i64, cx: C) -> Self {
MemoryPointer::new(
self.alloc_id,
- cx.data_layout().wrapping_signed_offset(self.offset, i),
+ Size::from_bytes(cx.data_layout().wrapping_signed_offset(self.offset.bytes(), i)),
)
}
pub fn overflowing_signed_offset<C: HasDataLayout>(self, i: i128, cx: C) -> (Self, bool) {
- let (res, over) = cx.data_layout().overflowing_signed_offset(self.offset, i);
- (MemoryPointer::new(self.alloc_id, res), over)
+ let (res, over) = cx.data_layout().overflowing_signed_offset(self.offset.bytes(), i);
+ (MemoryPointer::new(self.alloc_id, Size::from_bytes(res)), over)
}
pub(crate) fn signed_offset<C: HasDataLayout>(self, i: i64, cx: C) -> EvalResult<'tcx, Self> {
Ok(MemoryPointer::new(
self.alloc_id,
- cx.data_layout().signed_offset(self.offset, i)?,
+ Size::from_bytes(cx.data_layout().signed_offset(self.offset.bytes(), i)?),
))
}
- pub fn overflowing_offset<C: HasDataLayout>(self, i: u64, cx: C) -> (Self, bool) {
- let (res, over) = cx.data_layout().overflowing_offset(self.offset, i);
- (MemoryPointer::new(self.alloc_id, res), over)
+ pub fn overflowing_offset<C: HasDataLayout>(self, i: Size, cx: C) -> (Self, bool) {
+ let (res, over) = cx.data_layout().overflowing_offset(self.offset.bytes(), i.bytes());
+ (MemoryPointer::new(self.alloc_id, Size::from_bytes(res)), over)
}
- pub fn offset<C: HasDataLayout>(self, i: u64, cx: C) -> EvalResult<'tcx, Self> {
+ pub fn offset<C: HasDataLayout>(self, i: Size, cx: C) -> EvalResult<'tcx, Self> {
Ok(MemoryPointer::new(
self.alloc_id,
- cx.data_layout().offset(self.offset, i)?,
+ Size::from_bytes(cx.data_layout().offset(self.offset.bytes(), i.bytes())?),
))
}
}
pub bytes: Vec<u8>,
/// Maps from byte addresses to allocations.
/// Only the first byte of a pointer is inserted into the map.
- pub relocations: BTreeMap<u64, AllocId>,
+ pub relocations: BTreeMap<Size, AllocId>,
/// Denotes undefined memory. Reading from undefined memory is forbidden in miri
pub undef_mask: UndefMask,
/// The alignment of the allocation to detect unaligned reads.
impl Allocation {
pub fn from_bytes(slice: &[u8], align: Align) -> Self {
- let mut undef_mask = UndefMask::new(0);
- undef_mask.grow(slice.len() as u64, true);
+ let mut undef_mask = UndefMask::new(Size::from_bytes(0));
+ undef_mask.grow(Size::from_bytes(slice.len() as u64), true);
Self {
bytes: slice.to_owned(),
relocations: BTreeMap::new(),
Allocation::from_bytes(slice, Align::from_bytes(1, 1).unwrap())
}
- pub fn undef(size: u64, align: Align) -> Self {
- assert_eq!(size as usize as u64, size);
+ pub fn undef(size: Size, align: Align) -> Self {
+ assert_eq!(size.bytes() as usize as u64, size.bytes());
Allocation {
- bytes: vec![0; size as usize],
+ bytes: vec![0; size.bytes() as usize],
relocations: BTreeMap::new(),
undef_mask: UndefMask::new(size),
align,
#[derive(Clone, Debug, Eq, PartialEq, Hash, RustcEncodable, RustcDecodable)]
pub struct UndefMask {
blocks: Vec<Block>,
- len: u64,
+ len: Size,
}
impl_stable_hash_for!(struct mir::interpret::UndefMask{blocks, len});
impl UndefMask {
- pub fn new(size: u64) -> Self {
+ pub fn new(size: Size) -> Self {
let mut m = UndefMask {
blocks: vec![],
- len: 0,
+ len: Size::from_bytes(0),
};
m.grow(size, false);
m
}
/// Check whether the range `start..end` (end-exclusive) is entirely defined.
- pub fn is_range_defined(&self, start: u64, end: u64) -> bool {
+ pub fn is_range_defined(&self, start: Size, end: Size) -> bool {
if end > self.len {
return false;
}
- for i in start..end {
- if !self.get(i) {
+ for i in start.bytes()..end.bytes() {
+ if !self.get(Size::from_bytes(i)) {
return false;
}
}
true
}
- pub fn set_range(&mut self, start: u64, end: u64, new_state: bool) {
+ pub fn set_range(&mut self, start: Size, end: Size, new_state: bool) {
let len = self.len;
if end > len {
self.grow(end - len, new_state);
self.set_range_inbounds(start, end, new_state);
}
- pub fn set_range_inbounds(&mut self, start: u64, end: u64, new_state: bool) {
- for i in start..end {
- self.set(i, new_state);
+ pub fn set_range_inbounds(&mut self, start: Size, end: Size, new_state: bool) {
+ for i in start.bytes()..end.bytes() {
+ self.set(Size::from_bytes(i), new_state);
}
}
- pub fn get(&self, i: u64) -> bool {
+ pub fn get(&self, i: Size) -> bool {
let (block, bit) = bit_index(i);
(self.blocks[block] & 1 << bit) != 0
}
- pub fn set(&mut self, i: u64, new_state: bool) {
+ pub fn set(&mut self, i: Size, new_state: bool) {
let (block, bit) = bit_index(i);
if new_state {
self.blocks[block] |= 1 << bit;
}
}
- pub fn grow(&mut self, amount: u64, new_state: bool) {
- let unused_trailing_bits = self.blocks.len() as u64 * BLOCK_SIZE - self.len;
- if amount > unused_trailing_bits {
- let additional_blocks = amount / BLOCK_SIZE + 1;
+ pub fn grow(&mut self, amount: Size, new_state: bool) {
+ let unused_trailing_bits = self.blocks.len() as u64 * BLOCK_SIZE - self.len.bytes();
+ if amount.bytes() > unused_trailing_bits {
+ let additional_blocks = amount.bytes() / BLOCK_SIZE + 1;
assert_eq!(additional_blocks as usize as u64, additional_blocks);
self.blocks.extend(
iter::repeat(0).take(additional_blocks as usize),
}
}
-fn bit_index(bits: u64) -> (usize, usize) {
+fn bit_index(bits: Size) -> (usize, usize) {
+ let bits = bits.bytes();
let a = bits / BLOCK_SIZE;
let b = bits % BLOCK_SIZE;
assert_eq!(a as usize as u64, a);
#![allow(unknown_lints)]
-use ty::layout::{Align, HasDataLayout};
+use ty::layout::{Align, HasDataLayout, Size};
use ty;
use super::{EvalResult, MemoryPointer, PointerArithmetic, Allocation};
/// Used only for types with layout::abi::ScalarPair
ByValPair(PrimVal, PrimVal),
/// Used only for the remaining cases. An allocation + offset into the allocation
- ByRef(&'tcx Allocation, u64),
+ ByRef(&'tcx Allocation, Size),
}
impl<'tcx> ConstValue<'tcx> {
}
}
- pub fn offset<C: HasDataLayout>(self, i: u64, cx: C) -> EvalResult<'tcx, Self> {
+ pub fn offset<C: HasDataLayout>(self, i: Size, cx: C) -> EvalResult<'tcx, Self> {
let layout = cx.data_layout();
match self.primval {
PrimVal::Bytes(b) => {
assert_eq!(b as u64 as u128, b);
Ok(Pointer::from(
- PrimVal::Bytes(layout.offset(b as u64, i)? as u128),
+ PrimVal::Bytes(layout.offset(b as u64, i.bytes())? as u128),
))
}
PrimVal::Ptr(ptr) => ptr.offset(i, layout).map(Pointer::from),
}
}
- pub fn from_uint_size(size: u64) -> Self {
- match size {
+ pub fn from_uint_size(size: Size) -> Self {
+ match size.bytes() {
1 => PrimValKind::U8,
2 => PrimValKind::U16,
4 => PrimValKind::U32,
8 => PrimValKind::U64,
16 => PrimValKind::U128,
- _ => bug!("can't make uint with size {}", size),
+ _ => bug!("can't make uint with size {}", size.bytes()),
}
}
- pub fn from_int_size(size: u64) -> Self {
- match size {
+ pub fn from_int_size(size: Size) -> Self {
+ match size.bytes() {
1 => PrimValKind::I8,
2 => PrimValKind::I16,
4 => PrimValKind::I32,
8 => PrimValKind::I64,
16 => PrimValKind::I128,
- _ => bug!("can't make int with size {}", size),
+ _ => bug!("can't make int with size {}", size.bytes()),
}
}
.get_alloc(ptr.alloc_id);
if let Some(alloc) = alloc {
assert_eq!(len as usize as u128, len);
- let slice = &alloc.bytes[(ptr.offset as usize)..][..(len as usize)];
+ let slice = &alloc.bytes[(ptr.offset.bytes() as usize)..][..(len as usize)];
let s = ::std::str::from_utf8(slice)
.expect("non utf8 str from miri");
write!(f, "{:?}", s)
use rustc_data_structures::indexed_vec::Idx;
use rustc::mir::interpret::{GlobalId, MemoryPointer, PrimVal, Allocation, ConstValue};
use rustc::ty::{self, Ty};
-use rustc::ty::layout::{self, HasDataLayout, LayoutOf, Scalar};
+use rustc::ty::layout::{self, HasDataLayout, LayoutOf, Scalar, Size};
use builder::Builder;
use common::{CodegenCx};
use common::{C_bytes, C_struct, C_uint_big, C_undef, C_usize};
let llval = unsafe { llvm::LLVMConstInBoundsGEP(
consts::bitcast(base_addr, Type::i8p(cx)),
- &C_usize(cx, ptr.offset),
+ &C_usize(cx, ptr.offset.bytes()),
1,
) };
if scalar.value != layout::Pointer {
let mut next_offset = 0;
for (&offset, &alloc_id) in &alloc.relocations {
+ let offset = offset.bytes();
assert_eq!(offset as usize as u64, offset);
let offset = offset as usize;
if offset > next_offset {
).expect("const_alloc_to_llvm: could not read relocation pointer") as u64;
llvals.push(primval_to_llvm(
cx,
- PrimVal::Ptr(MemoryPointer { alloc_id, offset: ptr_offset }),
+ PrimVal::Ptr(MemoryPointer { alloc_id, offset: Size::from_bytes(ptr_offset) }),
&Scalar {
value: layout::Primitive::Pointer,
valid_range: 0..=!0
let static_ = cx.tcx.const_eval(param_env.and(cid))?;
let alloc = match static_.val {
- ConstVal::Value(ConstValue::ByRef(alloc, 0)) => alloc,
+ ConstVal::Value(ConstValue::ByRef(alloc, n)) if n.bytes() == 0 => alloc,
_ => bug!("static const eval returned {:#?}", static_),
};
Ok(const_alloc_to_llvm(cx, alloc))
let llval = unsafe { LLVMConstInBoundsGEP(
consts::bitcast(base_addr, Type::i8p(bx.cx)),
- &C_usize(bx.cx, offset),
+ &C_usize(bx.cx, offset.bytes()),
1,
)};
let llval = consts::bitcast(llval, layout.llvm_type(bx.cx).ptr_to());
use rustc::hir::map::blocks::FnLikeNode;
use rustc::middle::region;
use rustc::infer::InferCtxt;
-use rustc::ty::layout::IntegerExt;
+use rustc::ty::layout::{IntegerExt, Size};
use rustc::ty::subst::Subst;
use rustc::ty::{self, Ty, TyCtxt, layout};
use rustc::ty::subst::Substs;
LitKind::Str(ref s, _) => {
let s = s.as_str();
let id = self.tcx.allocate_cached(s.as_bytes());
- let ptr = MemoryPointer::new(id, 0);
+ let ptr = MemoryPointer::new(id, Size::from_bytes(0));
ConstValue::ByValPair(
PrimVal::Ptr(ptr),
PrimVal::from_u128(s.len() as u128),
},
LitKind::ByteStr(ref data) => {
let id = self.tcx.allocate_cached(data);
- let ptr = MemoryPointer::new(id, 0);
+ let ptr = MemoryPointer::new(id, Size::from_bytes(0));
ConstValue::ByVal(PrimVal::Ptr(ptr))
},
LitKind::Byte(n) => ConstValue::ByVal(PrimVal::Bytes(n as u128)),
.interpret_interner
.get_alloc(ptr.alloc_id)
.unwrap();
- assert_eq!(ptr.offset, 0);
+ assert_eq!(ptr.offset.bytes(), 0);
// FIXME: check length
alloc.bytes.iter().map(|b| {
&*pattern_arena.alloc(Pattern {
use rustc::mir::{fmt_const_val, Field, BorrowKind, Mutability};
use rustc::mir::interpret::{PrimVal, GlobalId, ConstValue};
use rustc::ty::{self, TyCtxt, AdtDef, Ty, Region};
+use rustc::ty::layout::Size;
use rustc::ty::subst::{Substs, Kind};
use rustc::hir::{self, PatKind, RangeEnd};
use rustc::hir::def::{Def, CtorKind};
LitKind::Str(ref s, _) => {
let s = s.as_str();
let id = tcx.allocate_cached(s.as_bytes());
- let ptr = MemoryPointer::new(id, 0);
+ let ptr = MemoryPointer::new(id, Size::from_bytes(0));
ConstValue::ByValPair(
PrimVal::Ptr(ptr),
PrimVal::from_u128(s.len() as u128),
},
LitKind::ByteStr(ref data) => {
let id = tcx.allocate_cached(data);
- let ptr = MemoryPointer::new(id, 0);
+ let ptr = MemoryPointer::new(id, Size::from_bytes(0));
ConstValue::ByVal(PrimVal::Ptr(ptr))
},
LitKind::Byte(n) => ConstValue::ByVal(PrimVal::Bytes(n as u128)),
match dest_ty.sty {
// float -> uint
TyUint(t) => {
- let width = t.bit_width().unwrap_or(self.memory.pointer_size() as usize * 8);
+ let width = t.bit_width().unwrap_or(self.memory.pointer_size().bytes() as usize * 8);
match fty {
FloatTy::F32 => Ok(PrimVal::Bytes(Single::from_bits(bits).to_u128(width).value)),
FloatTy::F64 => Ok(PrimVal::Bytes(Double::from_bits(bits).to_u128(width).value)),
},
// float -> int
TyInt(t) => {
- let width = t.bit_width().unwrap_or(self.memory.pointer_size() as usize * 8);
+ let width = t.bit_width().unwrap_or(self.memory.pointer_size().bytes() as usize * 8);
match fty {
FloatTy::F32 => Ok(PrimVal::from_i128(Single::from_bits(bits).to_i128(width).value)),
FloatTy::F64 => Ok(PrimVal::from_i128(Double::from_bits(bits).to_i128(width).value)),
let ptr = ptr.primval.to_ptr().unwrap();
let alloc = ecx.memory.get(ptr.alloc_id)?;
assert!(alloc.align.abi() >= align.abi());
- assert!(alloc.bytes.len() as u64 - ptr.offset >= layout.size.bytes());
+ assert!(alloc.bytes.len() as u64 - ptr.offset.bytes() >= layout.size.bytes());
let mut alloc = alloc.clone();
alloc.align = align;
let alloc = ecx.tcx.intern_const_alloc(alloc);
let layout = ecx.layout_of(mir.return_ty().subst(tcx, cid.instance.substs))?;
assert!(!layout.is_unsized());
let ptr = ecx.memory.allocate(
- layout.size.bytes(),
+ layout.size,
layout.align,
None,
)?;
let (ptr, align) = match value {
Value::ByValPair(..) | Value::ByVal(_) => {
let layout = ecx.layout_of(ty)?;
- let ptr = ecx.memory.allocate(layout.size.bytes(), layout.align, Some(MemoryKind::Stack))?;
+ let ptr = ecx.memory.allocate(layout.size, layout.align, Some(MemoryKind::Stack))?;
let ptr: Pointer = ptr.into();
ecx.write_value_to_ptr(value, ptr, layout.align, ty)?;
(ptr, layout.align)
) -> &'tcx Allocation {
match val {
ConstValue::ByRef(alloc, offset) => {
- assert_eq!(offset, 0);
+ assert_eq!(offset.bytes(), 0);
return alloc;
},
_ => ()
());
let value = ecx.const_value_to_value(val, ty)?;
let layout = ecx.layout_of(ty)?;
- let ptr = ecx.memory.allocate(layout.size.bytes(), layout.align, Some(MemoryKind::Stack))?;
+ let ptr = ecx.memory.allocate(layout.size, layout.align, Some(MemoryKind::Stack))?;
ecx.write_value_to_ptr(value, ptr.into(), layout.align, ty)?;
let alloc = ecx.memory.get(ptr.alloc_id)?;
Ok(tcx.intern_const_alloc(alloc.clone()))
let layout = self.layout_of(ty)?;
assert!(!layout.is_unsized(), "cannot alloc memory for unsized type");
- let size = layout.size.bytes();
- self.memory.allocate(size, layout.align, Some(MemoryKind::Stack))
+ self.memory.allocate(layout.size, layout.align, Some(MemoryKind::Stack))
}
pub fn memory(&self) -> &Memory<'a, 'mir, 'tcx, M> {
)
}
};
- let elem_size = self.layout_of(elem_ty)?.size.bytes();
+ let elem_size = self.layout_of(elem_ty)?.size;
let value = self.eval_operand(operand)?.value;
let (dest, dest_align) = self.force_allocation(dest)?.to_ptr_align();
// FIXME: speed up repeat filling
for i in 0..length {
- let elem_dest = dest.offset(i * elem_size, &self)?;
+ let elem_dest = dest.offset(elem_size * i as u64, &self)?;
self.write_value_to_ptr(value, elem_dest, dest_align, elem_ty)?;
}
}
.interpret_interner
.cache_static(gid.instance.def_id());
let layout = self.layout_of(ty)?;
- let ptr = MemoryPointer::new(alloc_id, 0);
+ let ptr = MemoryPointer::new(alloc_id, Size::from_bytes(0));
return Ok(Value::ByRef(ptr.into(), layout.align))
}
let cv = self.const_eval(gid)?;
} else {
let dest_ptr = self.alloc_ptr(dest_ty)?.into();
let layout = self.layout_of(dest_ty)?;
- self.memory.copy(src_ptr, align.min(layout.align), dest_ptr, layout.align, layout.size.bytes(), false)?;
+ self.memory.copy(src_ptr, align.min(layout.align), dest_ptr, layout.align, layout.size, false)?;
write_dest(self, Value::ByRef(dest_ptr, layout.align))?;
}
} else {
trace!("write_value_to_ptr: {:#?}, {}, {:#?}", value, dest_ty, layout);
match value {
Value::ByRef(ptr, align) => {
- self.memory.copy(ptr, align.min(layout.align), dest, dest_align.min(layout.align), layout.size.bytes(), false)
+ self.memory.copy(ptr, align.min(layout.align), dest, dest_align.min(layout.align), layout.size, false)
}
Value::ByVal(primval) => {
let signed = match layout.abi {
_ if primval.is_undef() => false,
_ => bug!("write_value_to_ptr: invalid ByVal layout: {:#?}", layout)
};
- self.memory.write_primval(dest, dest_align, primval, layout.size.bytes(), signed)
+ self.memory.write_primval(dest, dest_align, primval, layout.size, signed)
}
Value::ByValPair(a_val, b_val) => {
trace!("write_value_to_ptr valpair: {:#?}", layout);
let (a_size, b_size) = (a.size(&self), b.size(&self));
let a_ptr = dest;
let b_offset = a_size.abi_align(b.align(&self));
- let b_ptr = dest.offset(b_offset.bytes(), &self)?.into();
+ let b_ptr = dest.offset(b_offset, &self)?.into();
// TODO: What about signedess?
- self.memory.write_primval(a_ptr, dest_align, a_val, a_size.bytes(), false)?;
- self.memory.write_primval(b_ptr, dest_align, b_val, b_size.bytes(), false)
+ self.memory.write_primval(a_ptr, dest_align, a_val, a_size, false)?;
+ self.memory.write_primval(b_ptr, dest_align, b_val, b_size, false)
}
}
}
ty::TyInt(int_ty) => {
use syntax::ast::IntTy::*;
let size = match int_ty {
- I8 => 1,
- I16 => 2,
- I32 => 4,
- I64 => 8,
- I128 => 16,
+ I8 => Size::from_bytes(1),
+ I16 => Size::from_bytes(2),
+ I32 => Size::from_bytes(4),
+ I64 => Size::from_bytes(8),
+ I128 => Size::from_bytes(16),
Isize => self.memory.pointer_size(),
};
PrimValKind::from_int_size(size)
ty::TyUint(uint_ty) => {
use syntax::ast::UintTy::*;
let size = match uint_ty {
- U8 => 1,
- U16 => 2,
- U32 => 4,
- U64 => 8,
- U128 => 16,
+ U8 => Size::from_bytes(1),
+ U16 => Size::from_bytes(2),
+ U32 => Size::from_bytes(4),
+ U64 => Size::from_bytes(8),
+ U128 => Size::from_bytes(16),
Usize => self.memory.pointer_size(),
};
PrimValKind::from_uint_size(size)
layout::Abi::Scalar(ref scalar) => {
use rustc::ty::layout::Primitive::*;
match scalar.value {
- Int(i, false) => PrimValKind::from_uint_size(i.size().bytes()),
- Int(i, true) => PrimValKind::from_int_size(i.size().bytes()),
+ Int(i, false) => PrimValKind::from_uint_size(i.size()),
+ Int(i, true) => PrimValKind::from_int_size(i.size()),
F32 => PrimValKind::F32,
F64 => PrimValKind::F64,
Pointer => PrimValKind::Ptr,
) -> EvalResult<'tcx> {
match ty.sty {
ty::TyBool => {
- let val = self.memory.read_primval(ptr, ptr_align, 1)?;
+ let val = self.memory.read_primval(ptr, ptr_align, Size::from_bytes(1))?;
match val {
PrimVal::Bytes(0) | PrimVal::Bytes(1) => (),
// TODO: This seems a little overeager, should reading at bool type already be insta-UB?
}
}
ty::TyChar => {
- let c = self.memory.read_primval(ptr, ptr_align, 4)?.to_bytes()? as u32;
+ let c = self.memory.read_primval(ptr, ptr_align, Size::from_bytes(4))?.to_bytes()? as u32;
match ::std::char::from_u32(c) {
Some(..) => (),
None => return err!(InvalidChar(c as u128)),
}
if let layout::Abi::Scalar(ref scalar) = self.layout_of(ty)?.abi {
- let size = scalar.value.size(self).bytes();
+ let size = scalar.value.size(self);
self.memory.read_primval(ptr, ptr_align, size)?;
}
}
match layout.abi {
layout::Abi::Scalar(..) => {
- let primval = self.memory.read_primval(ptr, ptr_align, layout.size.bytes())?;
+ let primval = self.memory.read_primval(ptr, ptr_align, layout.size)?;
Ok(Some(Value::ByVal(primval)))
}
layout::Abi::ScalarPair(ref a, ref b) => {
let (a_size, b_size) = (a.size(self), b.size(self));
let a_ptr = ptr;
let b_offset = a_size.abi_align(b.align(self));
- let b_ptr = ptr.offset(b_offset.bytes(), self)?.into();
- let a_val = self.memory.read_primval(a_ptr, ptr_align, a_size.bytes())?;
- let b_val = self.memory.read_primval(b_ptr, ptr_align, b_size.bytes())?;
+ let b_ptr = ptr.offset(b_offset, self)?.into();
+ let a_val = self.memory.read_primval(a_ptr, ptr_align, a_size)?;
+ let b_val = self.memory.read_primval(b_ptr, ptr_align, b_size)?;
Ok(Some(Value::ByValPair(a_val, b_val)))
}
_ => Ok(None),
use rustc::mir;
use rustc::ty::{self, Ty};
+use rustc::ty::layout::Size;
use syntax::codemap::Span;
use syntax::ast::Mutability;
fn check_locks<'a>(
_mem: &Memory<'a, 'mir, 'tcx, Self>,
_ptr: MemoryPointer,
- _size: u64,
+ _size: Size,
_access: AccessKind,
) -> EvalResult<'tcx> {
Ok(())
use rustc::ty::Instance;
use rustc::ty::ParamEnv;
use rustc::ty::maps::TyCtxtAt;
-use rustc::ty::layout::{self, Align, TargetDataLayout};
+use rustc::ty::layout::{self, Align, TargetDataLayout, Size};
use syntax::ast::Mutability;
use rustc::middle::const_val::{ConstVal, ErrKind};
pub fn create_fn_alloc(&mut self, instance: Instance<'tcx>) -> MemoryPointer {
let id = self.tcx.interpret_interner.create_fn_alloc(instance);
- MemoryPointer::new(id, 0)
+ MemoryPointer::new(id, Size::from_bytes(0))
}
pub fn allocate_cached(&mut self, bytes: &[u8]) -> MemoryPointer {
let id = self.tcx.allocate_cached(bytes);
- MemoryPointer::new(id, 0)
+ MemoryPointer::new(id, Size::from_bytes(0))
}
/// kind is `None` for statics
/// kind is `None` for statics
pub fn allocate(
&mut self,
- size: u64,
+ size: Size,
align: Align,
kind: Option<MemoryKind<M::MemoryKinds>>,
) -> EvalResult<'tcx, MemoryPointer> {
let id = self.allocate_value(Allocation::undef(size, align), kind)?;
- Ok(MemoryPointer::new(id, 0))
+ Ok(MemoryPointer::new(id, Size::from_bytes(0)))
}
pub fn reallocate(
&mut self,
ptr: MemoryPointer,
- old_size: u64,
+ old_size: Size,
old_align: Align,
- new_size: u64,
+ new_size: Size,
new_align: Align,
kind: MemoryKind<M::MemoryKinds>,
) -> EvalResult<'tcx, MemoryPointer> {
- if ptr.offset != 0 {
+ if ptr.offset.bytes() != 0 {
return err!(ReallocateNonBasePtr);
}
if self.alloc_map.contains_key(&ptr.alloc_id) {
pub fn deallocate(
&mut self,
ptr: MemoryPointer,
- size_and_align: Option<(u64, Align)>,
+ size_and_align: Option<(Size, Align)>,
kind: MemoryKind<M::MemoryKinds>,
) -> EvalResult<'tcx> {
- if ptr.offset != 0 {
+ if ptr.offset.bytes() != 0 {
return err!(DeallocateNonBasePtr);
}
));
}
if let Some((size, align)) = size_and_align {
- if size != alloc.bytes.len() as u64 || align != alloc.align {
- return err!(IncorrectAllocationInformation(size, alloc.bytes.len(), align.abi(), alloc.align.abi()));
+ if size.bytes() != alloc.bytes.len() as u64 || align != alloc.align {
+ return err!(IncorrectAllocationInformation(size, Size::from_bytes(alloc.bytes.len() as u64), align, alloc.align));
}
}
Ok(())
}
- pub fn pointer_size(&self) -> u64 {
- self.tcx.data_layout.pointer_size.bytes()
+ pub fn pointer_size(&self) -> Size {
+ self.tcx.data_layout.pointer_size
}
pub fn endianness(&self) -> layout::Endian {
let (offset, alloc_align) = match ptr.into_inner_primval() {
PrimVal::Ptr(ptr) => {
let alloc = self.get(ptr.alloc_id)?;
- (ptr.offset, alloc.align)
+ (ptr.offset.bytes(), alloc.align)
}
PrimVal::Bytes(bytes) => {
- let v = ((bytes as u128) % (1 << self.pointer_size())) as u64;
+ let v = ((bytes as u128) % (1 << self.pointer_size().bytes())) as u64;
if v == 0 {
return err!(InvalidNullPointerUsage);
}
// Check alignment
if alloc_align.abi() < required_align.abi() {
return err!(AlignmentCheckFailed {
- has: alloc_align.abi(),
- required: required_align.abi(),
+ has: alloc_align,
+ required: required_align,
});
}
if offset % required_align.abi() == 0 {
Ok(())
} else {
+ let has = offset % required_align.abi();
err!(AlignmentCheckFailed {
- has: offset % required_align.abi(),
- required: required_align.abi(),
+ has: Align::from_bytes(has, has).unwrap(),
+ required: required_align,
})
}
}
pub fn check_bounds(&self, ptr: MemoryPointer, access: bool) -> EvalResult<'tcx> {
let alloc = self.get(ptr.alloc_id)?;
let allocation_size = alloc.bytes.len() as u64;
- if ptr.offset > allocation_size {
+ if ptr.offset.bytes() > allocation_size {
return err!(PointerOutOfBounds {
ptr,
access,
- allocation_size,
+ allocation_size: Size::from_bytes(allocation_size),
});
}
Ok(())
}
pub fn get_fn(&self, ptr: MemoryPointer) -> EvalResult<'tcx, Instance<'tcx>> {
- if ptr.offset != 0 {
+ if ptr.offset.bytes() != 0 {
return err!(InvalidFunctionPointer);
}
debug!("reading fn ptr: {}", ptr.alloc_id);
};
for i in 0..(alloc.bytes.len() as u64) {
+ let i = Size::from_bytes(i);
if let Some(&target_id) = alloc.relocations.get(&i) {
if allocs_seen.insert(target_id) {
allocs_to_print.push_back(target_id);
}
relocations.push((i, target_id));
}
- if alloc.undef_mask.is_range_defined(i, i + 1) {
+ if alloc.undef_mask.is_range_defined(i, i + Size::from_bytes(1)) {
// this `as usize` is fine, since `i` came from a `usize`
- write!(msg, "{:02x} ", alloc.bytes[i as usize]).unwrap();
+ write!(msg, "{:02x} ", alloc.bytes[i.bytes() as usize]).unwrap();
} else {
msg.push_str("__ ");
}
if !relocations.is_empty() {
msg.clear();
write!(msg, "{:1$}", "", prefix_len).unwrap(); // Print spaces.
- let mut pos = 0;
- let relocation_width = (self.pointer_size() - 1) * 3;
+ let mut pos = Size::from_bytes(0);
+ let relocation_width = (self.pointer_size().bytes() - 1) * 3;
for (i, target_id) in relocations {
// this `as usize` is fine, since we can't print more chars than `usize::MAX`
- write!(msg, "{:1$}", "", ((i - pos) * 3) as usize).unwrap();
+ write!(msg, "{:1$}", "", ((i - pos) * 3).bytes() as usize).unwrap();
let target = format!("({})", target_id);
// this `as usize` is fine, since we can't print more chars than `usize::MAX`
write!(msg, "└{0:─^1$}┘ ", target, relocation_width as usize).unwrap();
fn get_bytes_unchecked(
&self,
ptr: MemoryPointer,
- size: u64,
+ size: Size,
align: Align,
) -> EvalResult<'tcx, &[u8]> {
// Zero-sized accesses can use dangling pointers, but they still have to be aligned and non-NULL
self.check_align(ptr.into(), align)?;
- if size == 0 {
+ if size.bytes() == 0 {
return Ok(&[]);
}
M::check_locks(self, ptr, size, AccessKind::Read)?;
self.check_bounds(ptr.offset(size, self)?, true)?; // if ptr.offset is in bounds, then so is ptr (because offset checks for overflow)
let alloc = self.get(ptr.alloc_id)?;
- assert_eq!(ptr.offset as usize as u64, ptr.offset);
- assert_eq!(size as usize as u64, size);
- let offset = ptr.offset as usize;
- Ok(&alloc.bytes[offset..offset + size as usize])
+ assert_eq!(ptr.offset.bytes() as usize as u64, ptr.offset.bytes());
+ assert_eq!(size.bytes() as usize as u64, size.bytes());
+ let offset = ptr.offset.bytes() as usize;
+ Ok(&alloc.bytes[offset..offset + size.bytes() as usize])
}
fn get_bytes_unchecked_mut(
&mut self,
ptr: MemoryPointer,
- size: u64,
+ size: Size,
align: Align,
) -> EvalResult<'tcx, &mut [u8]> {
// Zero-sized accesses can use dangling pointers, but they still have to be aligned and non-NULL
self.check_align(ptr.into(), align)?;
- if size == 0 {
+ if size.bytes() == 0 {
return Ok(&mut []);
}
M::check_locks(self, ptr, size, AccessKind::Write)?;
self.check_bounds(ptr.offset(size, &*self)?, true)?; // if ptr.offset is in bounds, then so is ptr (because offset checks for overflow)
let alloc = self.get_mut(ptr.alloc_id)?;
- assert_eq!(ptr.offset as usize as u64, ptr.offset);
- assert_eq!(size as usize as u64, size);
- let offset = ptr.offset as usize;
- Ok(&mut alloc.bytes[offset..offset + size as usize])
+ assert_eq!(ptr.offset.bytes() as usize as u64, ptr.offset.bytes());
+ assert_eq!(size.bytes() as usize as u64, size.bytes());
+ let offset = ptr.offset.bytes() as usize;
+ Ok(&mut alloc.bytes[offset..offset + size.bytes() as usize])
}
- fn get_bytes(&self, ptr: MemoryPointer, size: u64, align: Align) -> EvalResult<'tcx, &[u8]> {
- assert_ne!(size, 0);
+ fn get_bytes(&self, ptr: MemoryPointer, size: Size, align: Align) -> EvalResult<'tcx, &[u8]> {
+ assert_ne!(size.bytes(), 0);
if self.relocations(ptr, size)?.count() != 0 {
return err!(ReadPointerAsBytes);
}
fn get_bytes_mut(
&mut self,
ptr: MemoryPointer,
- size: u64,
+ size: Size,
align: Align,
) -> EvalResult<'tcx, &mut [u8]> {
- assert_ne!(size, 0);
+ assert_ne!(size.bytes(), 0);
self.clear_relocations(ptr, size)?;
self.mark_definedness(ptr.into(), size, true)?;
self.get_bytes_unchecked_mut(ptr, size, align)
src_align: Align,
dest: Pointer,
dest_align: Align,
- size: u64,
+ size: Size,
nonoverlapping: bool,
) -> EvalResult<'tcx> {
// Empty accesses don't need to be valid pointers, but they should still be aligned
self.check_align(src, src_align)?;
self.check_align(dest, dest_align)?;
- if size == 0 {
+ if size.bytes() == 0 {
return Ok(());
}
let src = src.to_ptr()?;
// behind `src` and `dest`. Also, we use the overlapping-safe `ptr::copy` if `src` and
// `dest` could possibly overlap.
unsafe {
- assert_eq!(size as usize as u64, size);
+ assert_eq!(size.bytes() as usize as u64, size.bytes());
if src.alloc_id == dest.alloc_id {
if nonoverlapping {
if (src.offset <= dest.offset && src.offset + size > dest.offset) ||
));
}
}
- ptr::copy(src_bytes, dest_bytes, size as usize);
+ ptr::copy(src_bytes, dest_bytes, size.bytes() as usize);
} else {
- ptr::copy_nonoverlapping(src_bytes, dest_bytes, size as usize);
+ ptr::copy_nonoverlapping(src_bytes, dest_bytes, size.bytes() as usize);
}
}
pub fn read_c_str(&self, ptr: MemoryPointer) -> EvalResult<'tcx, &[u8]> {
let alloc = self.get(ptr.alloc_id)?;
- assert_eq!(ptr.offset as usize as u64, ptr.offset);
- let offset = ptr.offset as usize;
+ assert_eq!(ptr.offset.bytes() as usize as u64, ptr.offset.bytes());
+ let offset = ptr.offset.bytes() as usize;
match alloc.bytes[offset..].iter().position(|&c| c == 0) {
Some(size) => {
- if self.relocations(ptr, (size + 1) as u64)?.count() != 0 {
+ let p1 = Size::from_bytes((size + 1) as u64);
+ if self.relocations(ptr, p1)?.count() != 0 {
return err!(ReadPointerAsBytes);
}
- self.check_defined(ptr, (size + 1) as u64)?;
- M::check_locks(self, ptr, (size + 1) as u64, AccessKind::Read)?;
+ self.check_defined(ptr, p1)?;
+ M::check_locks(self, ptr, p1, AccessKind::Read)?;
Ok(&alloc.bytes[offset..offset + size])
}
None => err!(UnterminatedCString(ptr)),
}
}
- pub fn read_bytes(&self, ptr: Pointer, size: u64) -> EvalResult<'tcx, &[u8]> {
+ pub fn read_bytes(&self, ptr: Pointer, size: Size) -> EvalResult<'tcx, &[u8]> {
// Empty accesses don't need to be valid pointers, but they should still be non-NULL
let align = Align::from_bytes(1, 1).unwrap();
self.check_align(ptr, align)?;
- if size == 0 {
+ if size.bytes() == 0 {
return Ok(&[]);
}
self.get_bytes(ptr.to_ptr()?, size, align)
if src.is_empty() {
return Ok(());
}
- let bytes = self.get_bytes_mut(ptr.to_ptr()?, src.len() as u64, align)?;
+ let bytes = self.get_bytes_mut(ptr.to_ptr()?, Size::from_bytes(src.len() as u64), align)?;
bytes.clone_from_slice(src);
Ok(())
}
- pub fn write_repeat(&mut self, ptr: Pointer, val: u8, count: u64) -> EvalResult<'tcx> {
+ pub fn write_repeat(&mut self, ptr: Pointer, val: u8, count: Size) -> EvalResult<'tcx> {
// Empty accesses don't need to be valid pointers, but they should still be non-NULL
let align = Align::from_bytes(1, 1).unwrap();
self.check_align(ptr, align)?;
- if count == 0 {
+ if count.bytes() == 0 {
return Ok(());
}
let bytes = self.get_bytes_mut(ptr.to_ptr()?, count, align)?;
Ok(())
}
- pub fn read_primval(&self, ptr: MemoryPointer, ptr_align: Align, size: u64) -> EvalResult<'tcx, PrimVal> {
+ pub fn read_primval(&self, ptr: MemoryPointer, ptr_align: Align, size: Size) -> EvalResult<'tcx, PrimVal> {
self.check_relocation_edges(ptr, size)?; // Make sure we don't read part of a pointer as a pointer
let endianness = self.endianness();
let bytes = self.get_bytes_unchecked(ptr, size, ptr_align.min(self.int_align(size)))?;
} else {
let alloc = self.get(ptr.alloc_id)?;
match alloc.relocations.get(&ptr.offset) {
- Some(&alloc_id) => return Ok(PrimVal::Ptr(MemoryPointer::new(alloc_id, bytes as u64))),
+ Some(&alloc_id) => return Ok(PrimVal::Ptr(MemoryPointer::new(alloc_id, Size::from_bytes(bytes as u64)))),
None => {},
}
}
self.read_primval(ptr, ptr_align, self.pointer_size())
}
- pub fn write_primval(&mut self, ptr: Pointer, ptr_align: Align, val: PrimVal, size: u64, signed: bool) -> EvalResult<'tcx> {
+ pub fn write_primval(&mut self, ptr: Pointer, ptr_align: Align, val: PrimVal, size: Size, signed: bool) -> EvalResult<'tcx> {
let endianness = self.endianness();
let bytes = match val {
PrimVal::Ptr(val) => {
assert_eq!(size, self.pointer_size());
- val.offset as u128
+ val.offset.bytes() as u128
}
PrimVal::Bytes(bytes) => bytes,
self.write_primval(ptr.into(), ptr_align, val, ptr_size, false)
}
- fn int_align(&self, size: u64) -> Align {
+ fn int_align(&self, size: Size) -> Align {
// We assume pointer-sized integers have the same alignment as pointers.
// We also assume signed and unsigned integers of the same size have the same alignment.
- let ity = match size {
+ let ity = match size.bytes() {
1 => layout::I8,
2 => layout::I16,
4 => layout::I32,
8 => layout::I64,
16 => layout::I128,
- _ => bug!("bad integer size: {}", size),
+ _ => bug!("bad integer size: {}", size.bytes()),
};
ity.align(self)
}
fn relocations(
&self,
ptr: MemoryPointer,
- size: u64,
- ) -> EvalResult<'tcx, btree_map::Range<u64, AllocId>> {
- let start = ptr.offset.saturating_sub(self.pointer_size() - 1);
+ size: Size,
+ ) -> EvalResult<'tcx, btree_map::Range<Size, AllocId>> {
+ let start = ptr.offset.bytes().saturating_sub(self.pointer_size().bytes() - 1);
let end = ptr.offset + size;
- Ok(self.get(ptr.alloc_id)?.relocations.range(start..end))
+ Ok(self.get(ptr.alloc_id)?.relocations.range(Size::from_bytes(start)..end))
}
- fn clear_relocations(&mut self, ptr: MemoryPointer, size: u64) -> EvalResult<'tcx> {
+ fn clear_relocations(&mut self, ptr: MemoryPointer, size: Size) -> EvalResult<'tcx> {
// Find all relocations overlapping the given range.
let keys: Vec<_> = self.relocations(ptr, size)?.map(|(&k, _)| k).collect();
if keys.is_empty() {
Ok(())
}
- fn check_relocation_edges(&self, ptr: MemoryPointer, size: u64) -> EvalResult<'tcx> {
- let overlapping_start = self.relocations(ptr, 0)?.count();
- let overlapping_end = self.relocations(ptr.offset(size, self)?, 0)?.count();
+ fn check_relocation_edges(&self, ptr: MemoryPointer, size: Size) -> EvalResult<'tcx> {
+ let overlapping_start = self.relocations(ptr, Size::from_bytes(0))?.count();
+ let overlapping_end = self.relocations(ptr.offset(size, self)?, Size::from_bytes(0))?.count();
if overlapping_start + overlapping_end != 0 {
return err!(ReadPointerAsBytes);
}
&mut self,
src: MemoryPointer,
dest: MemoryPointer,
- size: u64,
+ size: Size,
) -> EvalResult<'tcx> {
// The bits have to be saved locally before writing to dest in case src and dest overlap.
- assert_eq!(size as usize as u64, size);
- let mut v = Vec::with_capacity(size as usize);
- for i in 0..size {
- let defined = self.get(src.alloc_id)?.undef_mask.get(src.offset + i);
+ assert_eq!(size.bytes() as usize as u64, size.bytes());
+ let mut v = Vec::with_capacity(size.bytes() as usize);
+ for i in 0..size.bytes() {
+ let defined = self.get(src.alloc_id)?.undef_mask.get(src.offset + Size::from_bytes(i));
v.push(defined);
}
for (i, defined) in v.into_iter().enumerate() {
self.get_mut(dest.alloc_id)?.undef_mask.set(
dest.offset +
- i as u64,
+ Size::from_bytes(i as u64),
defined,
);
}
Ok(())
}
- fn check_defined(&self, ptr: MemoryPointer, size: u64) -> EvalResult<'tcx> {
+ fn check_defined(&self, ptr: MemoryPointer, size: Size) -> EvalResult<'tcx> {
let alloc = self.get(ptr.alloc_id)?;
if !alloc.undef_mask.is_range_defined(
ptr.offset,
pub fn mark_definedness(
&mut self,
ptr: Pointer,
- size: u64,
+ size: Size,
new_state: bool,
) -> EvalResult<'tcx> {
- if size == 0 {
+ if size.bytes() == 0 {
return Ok(());
}
let ptr = ptr.to_ptr()?;
use rustc::mir;
use rustc::ty::{self, Ty, TyCtxt};
-use rustc::ty::layout::{self, Align, LayoutOf, TyLayout};
+use rustc::ty::layout::{self, Align, LayoutOf, TyLayout, Size};
use rustc_data_structures::indexed_vec::Idx;
use rustc::mir::interpret::{GlobalId, Value, PrimVal, EvalResult, Pointer, MemoryPointer};
};
let alloc = Machine::init_static(self, cid)?;
Place::Ptr {
- ptr: MemoryPointer::new(alloc, 0).into(),
+ ptr: MemoryPointer::new(alloc, Size::from_bytes(0)).into(),
align: layout.align,
extra: PlaceExtra::None,
}
base_layout.ty,
base_ptr.to_value_with_vtable(tab),
)?;
- offset.abi_align(align).bytes()
+ offset.abi_align(align)
}
- _ => offset.bytes(),
+ _ => offset,
};
let ptr = base_ptr.offset(offset, &self)?;
let (base_ptr, align) = base.to_ptr_align();
let (elem_ty, len) = base.elem_ty_and_len(outer_ty, self.tcx.tcx);
- let elem_size = self.layout_of(elem_ty)?.size.bytes();
+ let elem_size = self.layout_of(elem_ty)?.size;
assert!(
n < len,
"Tried to access element {} of array/slice with length {}",
n,
len
);
- let ptr = base_ptr.offset(n * elem_size, &*self)?;
+ let ptr = base_ptr.offset(elem_size * n, &*self)?;
Ok(Place::Ptr {
ptr,
align,
let (base_ptr, align) = base.to_ptr_align();
let (elem_ty, n) = base.elem_ty_and_len(base_ty, self.tcx.tcx);
- let elem_size = self.layout_of(elem_ty)?.size.bytes();
+ let elem_size = self.layout_of(elem_ty)?.size;
assert!(n >= min_length as u64);
let index = if from_end {
u64::from(offset)
};
- let ptr = base_ptr.offset(index * elem_size, &self)?;
+ let ptr = base_ptr.offset(elem_size * index, &self)?;
Ok(Place::Ptr { ptr, align, extra: PlaceExtra::None })
}
let (base_ptr, align) = base.to_ptr_align();
let (elem_ty, n) = base.elem_ty_and_len(base_ty, self.tcx.tcx);
- let elem_size = self.layout_of(elem_ty)?.size.bytes();
+ let elem_size = self.layout_of(elem_ty)?.size;
assert!(u64::from(from) <= n - u64::from(to));
- let ptr = base_ptr.offset(u64::from(from) * elem_size, &self)?;
+ let ptr = base_ptr.offset(elem_size * u64::from(from), &self)?;
// sublicing arrays produces arrays
let extra = if self.type_is_sized(base_ty) {
PlaceExtra::None
Value::ByRef(ptr, align) => {
for (i, arg_local) in arg_locals.enumerate() {
let field = layout.field(&self, i)?;
- let offset = layout.fields.offset(i).bytes();
+ let offset = layout.fields.offset(i);
let arg = Value::ByRef(ptr.offset(offset, &self)?,
align.min(field.align));
let dest =
}
/// Size of a type in bytes.
-#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)]
pub struct Size {
raw: u64
}