if let Ok(instance) = self.resolve_path(path) {
let cid = GlobalId { instance, promoted: None };
// compute global if not cached
- let val = match self.globals.get(&cid).map(|&ptr| ptr) {
- Some(ptr) => self.value_to_primval(Value::by_ref(ptr.into()), usize)?.to_u64()?,
+ let val = match self.globals.get(&cid).cloned() {
+ Some(ptr) => self.value_to_primval(Value::ByRef(ptr), usize)?.to_u64()?,
None => eval_body_as_primval(self.tcx, instance)?.0.to_u64()?,
};
if val == name {
Lvalue, LvalueExtra,
PrimVal, PrimValKind, Value, Pointer,
HasMemory,
- EvalContext,
+ EvalContext, PtrAndAlign,
};
use helpers::EvalContextExt as HelperEvalContextExt;
let size = self.type_size(dest_ty)?.expect("cannot zero unsized value");
let init = |this: &mut Self, val: Value| {
let zero_val = match val {
- Value::ByRef { ptr, aligned } => {
+ Value::ByRef(PtrAndAlign { ptr, .. }) => {
// These writes have no alignment restriction anyway.
this.memory.write_repeat(ptr, 0, size)?;
- Value::ByRef { ptr, aligned }
+ val
},
// TODO(solson): Revisit this, it's fishy to check for Undef here.
Value::ByVal(PrimVal::Undef) => match this.ty_to_primval_kind(dest_ty) {
};
match dest {
Lvalue::Local { frame, local } => self.modify_local(frame, local, init)?,
- Lvalue::Ptr { ptr, extra: LvalueExtra::None, aligned: true } => self.memory.write_repeat(ptr, 0, size)?,
+ Lvalue::Ptr { ptr: PtrAndAlign { ptr, aligned: true }, extra: LvalueExtra::None } => self.memory.write_repeat(ptr, 0, size)?,
Lvalue::Ptr { .. } => bug!("init intrinsic tried to write to fat or unaligned ptr target"),
}
}
let size = dest_layout.size(&self.tcx.data_layout).bytes();
let uninit = |this: &mut Self, val: Value| {
match val {
- Value::ByRef { ptr, aligned } => {
+ Value::ByRef(PtrAndAlign { ptr, .. }) => {
this.memory.mark_definedness(ptr, size, false)?;
- Ok(Value::ByRef { ptr, aligned })
+ Ok(val)
},
_ => Ok(Value::ByVal(PrimVal::Undef)),
}
};
match dest {
Lvalue::Local { frame, local } => self.modify_local(frame, local, uninit)?,
- Lvalue::Ptr { ptr, extra: LvalueExtra::None, aligned: true } =>
+ Lvalue::Ptr { ptr: PtrAndAlign { ptr, aligned: true }, extra: LvalueExtra::None } =>
self.memory.mark_definedness(ptr, size, false)?,
Lvalue::Ptr { .. } => bug!("uninit intrinsic tried to write to fat or unaligned ptr target"),
}
EvalResult, EvalError, EvalErrorKind,
GlobalId, Lvalue, Value,
PrimVal,
- EvalContext, StackPopCleanup,
+ EvalContext, StackPopCleanup, PtrAndAlign,
Kind,
};
let size = ecx.type_size_with_substs(mir.return_ty, instance.substs)?.expect("unsized global");
let align = ecx.type_align_with_substs(mir.return_ty, instance.substs)?;
let ptr = ecx.memory.allocate(size, align, Kind::UninitializedStatic)?;
- ecx.globals.insert(cid, ptr);
+ let aligned = !ecx.is_packed(mir.return_ty)?;
+ ecx.globals.insert(cid, PtrAndAlign { ptr: ptr.into(), aligned });
let mutable = !mir.return_ty.is_freeze(
ecx.tcx,
ty::ParamEnv::empty(Reveal::All),
while ecx.step()? {}
}
- let value = Value::by_ref(ecx.globals.get(&cid).expect("global not cached").into());
+ let value = Value::ByRef(*ecx.globals.get(&cid).expect("global not cached"));
Ok((ecx.value_to_primval(value, mir.return_ty)?, mir.return_ty))
}
use rustc::middle::region::CodeExtent;
use rustc::mir;
use rustc::traits::Reveal;
-use rustc::ty::layout::{self, Layout, Size, Align};
+use rustc::ty::layout::{self, Layout, Size, Align, HasDataLayout};
use rustc::ty::subst::{Subst, Substs, Kind};
use rustc::ty::{self, Ty, TyCtxt, TypeFoldable, Binder};
use rustc::traits;
pub(crate) suspended: HashMap<DynamicLifetime, Vec<ValidationQuery<'tcx>>>,
/// Precomputed statics, constants and promoteds.
- pub globals: HashMap<GlobalId<'tcx>, MemoryPointer>,
+ pub globals: HashMap<GlobalId<'tcx>, PtrAndAlign>,
/// The virtual call stack.
pub(crate) stack: Vec<Frame<'tcx>>,
pub packed: bool,
}
+#[derive(Copy, Clone, Debug)]
+pub struct PtrAndAlign {
+ pub ptr: Pointer,
+ /// Remember whether this lvalue is *supposed* to be aligned.
+ pub aligned: bool,
+}
+
+impl PtrAndAlign {
+ pub fn to_ptr<'tcx>(self) -> EvalResult<'tcx, MemoryPointer> {
+ self.ptr.to_ptr()
+ }
+ pub fn offset<'tcx, C: HasDataLayout>(self, i: u64, cx: C) -> EvalResult<'tcx, Self> {
+ Ok(PtrAndAlign {
+ ptr: self.ptr.offset(i, cx)?,
+ aligned: self.aligned,
+ })
+ }
+}
+
impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
pub fn new(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
}
pub fn deallocate_local(&mut self, local: Option<Value>) -> EvalResult<'tcx> {
- if let Some(Value::ByRef { ptr, aligned: _ }) = local {
+ if let Some(Value::ByRef(ptr)) = local {
trace!("deallocating local");
let ptr = ptr.to_ptr()?;
self.memory.dump_alloc(ptr.alloc_id);
self.memory.write_uint(discr_dest, discr_val, discr_size)?;
let dest = Lvalue::Ptr {
- ptr: dest_ptr.into(),
+ ptr: PtrAndAlign {
+ ptr: dest_ptr.into(),
+ aligned: true,
+ },
extra: LvalueExtra::DowncastVariant(variant_idx),
- aligned: true,
};
self.assign_fields(dest, dest_ty, operands)
self.inc_step_counter_and_check_limit(operands.len() as u64)?;
use rustc::ty::layout::Layout::*;
match *dest_layout {
- Univariant { .. } | Array { .. } => {
+ Univariant { ref variant, .. } => {
+ self.write_maybe_aligned_mut(!variant.packed, |ecx| {
+ ecx.assign_fields(dest, dest_ty, operands)
+ })?;
+ }
+
+ Array { .. } => {
self.assign_fields(dest, dest_ty, operands)?;
}
}
}
- StructWrappedNullablePointer { nndiscr, ref discrfield_source, .. } => {
+ StructWrappedNullablePointer { nndiscr, ref discrfield_source, ref nonnull, .. } => {
if let mir::AggregateKind::Adt(_, variant, _, _) = **kind {
if nndiscr == variant as u64 {
- self.assign_fields(dest, dest_ty, operands)?;
+ self.write_maybe_aligned_mut(!nonnull.packed, |ecx| {
+ ecx.assign_fields(dest, dest_ty, operands)
+ })?;
} else {
for operand in operands {
let operand_ty = self.operand_ty(operand);
let dest = dest.offset(offset.bytes(), &self)?;
let dest_size = self.type_size(ty)?
.expect("bad StructWrappedNullablePointer discrfield");
- self.memory.write_int(dest, 0, dest_size)?;
+ self.memory.write_maybe_aligned_mut(!nonnull.packed, |mem| {
+ mem.write_int(dest, 0, dest_size)
+ })?;
}
} else {
bug!("tried to assign {:?} to Layout::RawNullablePointer", kind);
self.assign_fields(dest, dest_ty, operands)?;
}
- UntaggedUnion { .. } => {
+ UntaggedUnion { ref variants } => {
assert_eq!(operands.len(), 1);
let operand = &operands[0];
let value = self.eval_operand(operand)?;
let value_ty = self.operand_ty(operand);
- self.write_value(value, dest, value_ty)?;
+ self.write_maybe_aligned_mut(!variants.packed, |ecx| {
+ ecx.write_value(value, dest, value_ty)
+ })?;
}
_ => {
let src = self.eval_lvalue(lvalue)?;
// We ignore the alignment of the lvalue here -- special handling for packed structs ends
// at the `&` operator.
- let (ptr, extra, _aligned) = self.force_allocation(src)?.to_ptr_extra_aligned();
+ let (ptr, extra) = self.force_allocation(src)?.to_ptr_extra_aligned();
let val = match extra {
- LvalueExtra::None => ptr.to_value(),
- LvalueExtra::Length(len) => ptr.to_value_with_len(len),
- LvalueExtra::Vtable(vtable) => ptr.to_value_with_vtable(vtable),
+ LvalueExtra::None => ptr.ptr.to_value(),
+ LvalueExtra::Length(len) => ptr.ptr.to_value_with_len(len),
+ LvalueExtra::Vtable(vtable) => ptr.ptr.to_value_with_vtable(vtable),
LvalueExtra::DowncastVariant(..) =>
bug!("attempted to take a reference to an enum downcast lvalue"),
};
Literal::Item { def_id, substs } => {
let instance = self.resolve_associated_const(def_id, substs);
let cid = GlobalId { instance, promoted: None };
- Value::by_ref(self.globals.get(&cid).expect("static/const not cached").into())
+ Value::ByRef(*self.globals.get(&cid).expect("static/const not cached"))
}
Literal::Promoted { index } => {
instance: self.frame().instance,
promoted: Some(index),
};
- Value::by_ref(self.globals.get(&cid).expect("promoted not cached").into())
+ Value::ByRef(*self.globals.get(&cid).expect("promoted not cached"))
}
};
}
}
+ pub fn read_global_as_value(&self, gid: GlobalId) -> Value {
+ Value::ByRef(*self.globals.get(&gid).expect("global not cached"))
+ }
+
pub fn operand_ty(&self, operand: &mir::Operand<'tcx>) -> Ty<'tcx> {
self.monomorphize(operand.ty(self.mir(), self.tcx), self.substs())
}
Ok(())
}
+ pub fn is_packed(&self, ty: Ty<'tcx>) -> EvalResult<'tcx, bool> {
+ let layout = self.type_layout(ty)?;
+ use rustc::ty::layout::Layout::*;
+ Ok(match *layout {
+ Univariant { ref variant, .. } => variant.packed,
+
+ StructWrappedNullablePointer { ref nonnull, .. } => nonnull.packed,
+
+ UntaggedUnion { ref variants } => variants.packed,
+
+ // can only apply #[repr(packed)] to struct and union
+ _ => false,
+ })
+ }
+
pub fn force_allocation(
&mut self,
lvalue: Lvalue,
// -1 since we don't store the return value
match self.stack[frame].locals[local.index() - 1] {
None => return err!(DeadLocal),
- Some(Value::ByRef { ptr, aligned }) => {
- Lvalue::Ptr { ptr, aligned, extra: LvalueExtra::None }
+ Some(Value::ByRef(ptr)) => {
+ Lvalue::Ptr { ptr, extra: LvalueExtra::None }
},
Some(val) => {
let ty = self.stack[frame].mir.local_decls[local].ty;
/// ensures this Value is not a ByRef
pub(super) fn follow_by_ref_value(&mut self, value: Value, ty: Ty<'tcx>) -> EvalResult<'tcx, Value> {
match value {
- Value::ByRef { ptr, aligned } => {
+ Value::ByRef(PtrAndAlign { ptr, aligned }) => {
self.read_maybe_aligned(aligned, |ectx| ectx.read_value(ptr, ty))
}
other => Ok(other),
// correct if we never look at this data with the wrong type.
match dest {
- Lvalue::Ptr { ptr, extra, aligned } => {
+ Lvalue::Ptr { ptr: PtrAndAlign { ptr, aligned }, extra } => {
assert_eq!(extra, LvalueExtra::None);
self.write_maybe_aligned_mut(aligned,
|ectx| ectx.write_value_to_ptr(src_val, ptr, dest_ty))
old_dest_val: Value,
dest_ty: Ty<'tcx>,
) -> EvalResult<'tcx> {
- if let Value::ByRef { ptr: dest_ptr, aligned } = old_dest_val {
+ if let Value::ByRef(PtrAndAlign { ptr: dest_ptr, aligned }) = old_dest_val {
// If the value is already `ByRef` (that is, backed by an `Allocation`),
// then we must write the new value into this allocation, because there may be
// other pointers into the allocation. These other pointers are logically
self.write_maybe_aligned_mut(aligned,
|ectx| ectx.write_value_to_ptr(src_val, dest_ptr, dest_ty))?;
- } else if let Value::ByRef { ptr: src_ptr, aligned } = src_val {
+ } else if let Value::ByRef(PtrAndAlign { ptr: src_ptr, aligned }) = src_val {
// If the value is not `ByRef`, then we know there are no pointers to it
// and we can simply overwrite the `Value` in the locals array directly.
//
dest_ty: Ty<'tcx>,
) -> EvalResult<'tcx> {
match value {
- Value::ByRef { ptr, aligned } => {
+ Value::ByRef(PtrAndAlign { ptr, aligned }) => {
self.read_maybe_aligned_mut(aligned, |ectx| ectx.copy(ptr, dest, dest_ty))
},
Value::ByVal(primval) => {
//let src = adt::MaybeSizedValue::sized(src);
//let dst = adt::MaybeSizedValue::sized(dst);
let src_ptr = match src {
- Value::ByRef { ptr, aligned: true } => ptr,
+ Value::ByRef(PtrAndAlign { ptr, aligned: true }) => ptr,
// TODO: Is it possible for unaligned pointers to occur here?
_ => bug!("expected aligned pointer, got {:?}", src),
};
Err(err) => {
panic!("Failed to access local: {:?}", err);
}
- Ok(Value::ByRef { ptr, aligned }) => match ptr.into_inner_primval() {
+ Ok(Value::ByRef(PtrAndAlign{ ptr, aligned })) => match ptr.into_inner_primval() {
PrimVal::Ptr(ptr) => {
write!(msg, " by {}ref:", if aligned { "" } else { "unaligned " }).unwrap();
allocs.push(ptr.alloc_id);
MemoryPointer,
PrimVal, Value, Pointer,
Machine,
+ PtrAndAlign,
};
#[derive(Copy, Clone, Debug)]
/// An lvalue may have an invalid (integral or undef) pointer,
/// since it might be turned back into a reference
/// before ever being dereferenced.
- ptr: Pointer,
+ ptr: PtrAndAlign,
extra: LvalueExtra,
- /// Remember whether this lvalue is *supposed* to be aligned.
- aligned: bool,
},
/// An lvalue referring to a value on the stack. Represented by a stack frame index paired with
}
pub fn from_primval_ptr(ptr: Pointer) -> Self {
- Lvalue::Ptr { ptr, extra: LvalueExtra::None, aligned: true }
+ Lvalue::Ptr { ptr: PtrAndAlign { ptr, aligned: true }, extra: LvalueExtra::None }
}
pub fn from_ptr(ptr: MemoryPointer) -> Self {
Self::from_primval_ptr(ptr.into())
}
- pub(super) fn to_ptr_extra_aligned(self) -> (Pointer, LvalueExtra, bool) {
+ pub(super) fn to_ptr_extra_aligned(self) -> (PtrAndAlign, LvalueExtra) {
match self {
- Lvalue::Ptr { ptr, extra, aligned } => (ptr, extra, aligned),
+ Lvalue::Ptr { ptr, extra } => (ptr, extra),
_ => bug!("to_ptr_and_extra: expected Lvalue::Ptr, got {:?}", self),
}
}
pub fn to_ptr(self) -> EvalResult<'tcx, MemoryPointer> {
- let (ptr, extra, _aligned) = self.to_ptr_extra_aligned();
+ let (ptr, extra) = self.to_ptr_extra_aligned();
// At this point, we forget about the alignment information -- the lvalue has been turned into a reference,
// and no matter where it came from, it now must be aligned.
assert_eq!(extra, LvalueExtra::None);
Static(ref static_) => {
let instance = ty::Instance::mono(self.tcx, static_.def_id);
let cid = GlobalId { instance, promoted: None };
- Ok(Some(Value::by_ref(self.globals.get(&cid).expect("global not cached").into())))
+ Ok(Some(Value::ByRef(*self.globals.get(&cid).expect("global not cached"))))
},
Projection(ref proj) => self.try_read_lvalue_projection(proj),
}
pub fn read_lvalue(&self, lvalue: Lvalue) -> EvalResult<'tcx, Value> {
match lvalue {
- Lvalue::Ptr { ptr, extra, aligned } => {
+ Lvalue::Ptr { ptr, extra } => {
assert_eq!(extra, LvalueExtra::None);
- Ok(Value::ByRef { ptr, aligned })
+ Ok(Value::ByRef(ptr))
}
Lvalue::Local { frame, local } => {
self.stack[frame].get_local(local)
Static(ref static_) => {
let instance = ty::Instance::mono(self.tcx, static_.def_id);
let gid = GlobalId { instance, promoted: None };
- Lvalue::from_ptr(*self.globals.get(&gid).expect("uncached global"))
+ Lvalue::Ptr {
+ ptr: *self.globals.get(&gid).expect("uncached global"),
+ extra: LvalueExtra::None,
+ }
}
Projection(ref proj) => {
},
General { ref variants, .. } => {
- let (_, base_extra, _) = base.to_ptr_extra_aligned();
+ let (_, base_extra) = base.to_ptr_extra_aligned();
if let LvalueExtra::DowncastVariant(variant_idx) = base_extra {
// +1 for the discriminant, which is field 0
- (variants[variant_idx].offsets[field_index + 1], variants[variant_idx].packed)
+ assert!(!variants[variant_idx].packed);
+ (variants[variant_idx].offsets[field_index + 1], false)
} else {
bug!("field access on enum had no variant index");
}
};
// Do not allocate in trivial cases
- let (base_ptr, base_extra, aligned) = match base {
- Lvalue::Ptr { ptr, extra, aligned } => (ptr, extra, aligned),
+ let (base_ptr, base_extra) = match base {
+ Lvalue::Ptr { ptr, extra } => (ptr, extra),
Lvalue::Local { frame, local } => match self.stack[frame].get_local(local)? {
// in case the type has a single field, just return the value
Value::ByVal(_) if self.get_field_count(base_ty).map(|c| c == 1).unwrap_or(false) => {
let offset = match base_extra {
LvalueExtra::Vtable(tab) => {
- let (_, align) = self.size_and_align_of_dst(base_ty, base_ptr.to_value_with_vtable(tab))?;
+ let (_, align) = self.size_and_align_of_dst(base_ty, base_ptr.ptr.to_value_with_vtable(tab))?;
offset.abi_align(Align::from_bytes(align, align).unwrap()).bytes()
}
_ => offset.bytes(),
};
- let ptr = base_ptr.offset(offset, &self)?;
+ let mut ptr = base_ptr.offset(offset, &self)?;
+ // if we were unaligned, stay unaligned
+ // no matter what we were, if we are packed, we must not be aligned anymore
+ ptr.aligned &= !packed;
let field_ty = self.monomorphize(field_ty, self.substs());
base_extra
};
- Ok(Lvalue::Ptr { ptr, extra, aligned: aligned && !packed })
+ Ok(Lvalue::Ptr { ptr, extra } )
}
pub(super) fn val_to_lvalue(&self, val: Value, ty: Ty<'tcx>) -> EvalResult<'tcx, Lvalue> {
Ok(match self.tcx.struct_tail(ty).sty {
ty::TyDynamic(..) => {
let (ptr, vtable) = val.into_ptr_vtable_pair(&self.memory)?;
- Lvalue::Ptr { ptr, extra: LvalueExtra::Vtable(vtable), aligned: true }
+ Lvalue::Ptr { ptr: PtrAndAlign { ptr, aligned: true }, extra: LvalueExtra::Vtable(vtable) }
},
ty::TyStr | ty::TySlice(_) => {
let (ptr, len) = val.into_slice(&self.memory)?;
- Lvalue::Ptr { ptr, extra: LvalueExtra::Length(len), aligned: true }
+ Lvalue::Ptr { ptr: PtrAndAlign { ptr, aligned: true }, extra: LvalueExtra::Length(len) }
},
- _ => Lvalue::Ptr { ptr: val.into_ptr(&self.memory)?, extra: LvalueExtra::None, aligned: true },
+ _ => Lvalue::from_primval_ptr(val.into_ptr(&self.memory)?),
})
}
pub(super) fn lvalue_index(&mut self, base: Lvalue, outer_ty: Ty<'tcx>, n: u64) -> EvalResult<'tcx, Lvalue> {
// Taking the outer type here may seem odd; it's needed because for array types, the outer type gives away the length.
let base = self.force_allocation(base)?;
- let (base_ptr, _, aligned) = base.to_ptr_extra_aligned();
+ let (base_ptr, _) = base.to_ptr_extra_aligned();
let (elem_ty, len) = base.elem_ty_and_len(outer_ty);
let elem_size = self.type_size(elem_ty)?.expect("slice element must be sized");
assert!(n < len, "Tried to access element {} of array/slice with length {}", n, len);
let ptr = base_ptr.offset(n * elem_size, self.memory.layout)?;
- Ok(Lvalue::Ptr { ptr, extra: LvalueExtra::None, aligned })
+ Ok(Lvalue::Ptr { ptr, extra: LvalueExtra::None })
}
pub(super) fn eval_lvalue_projection(
proj_elem: &mir::ProjectionElem<'tcx, mir::Operand<'tcx>, Ty<'tcx>>,
) -> EvalResult<'tcx, Lvalue> {
use rustc::mir::ProjectionElem::*;
- let (ptr, extra, aligned) = match *proj_elem {
+ let (ptr, extra) = match *proj_elem {
Field(field, field_ty) => {
return self.lvalue_field(base, field.index(), base_ty, field_ty);
}
let base_layout = self.type_layout(base_ty)?;
// FIXME(solson)
let base = self.force_allocation(base)?;
- let (base_ptr, base_extra, aligned) = base.to_ptr_extra_aligned();
+ let (base_ptr, base_extra) = base.to_ptr_extra_aligned();
use rustc::ty::layout::Layout::*;
let extra = match *base_layout {
RawNullablePointer { .. } | StructWrappedNullablePointer { .. } => base_extra,
_ => bug!("variant downcast on non-aggregate: {:?}", base_layout),
};
- (base_ptr, extra, aligned)
+ (base_ptr, extra)
}
Deref => {
ConstantIndex { offset, min_length, from_end } => {
// FIXME(solson)
let base = self.force_allocation(base)?;
- let (base_ptr, _, aligned) = base.to_ptr_extra_aligned();
+ let (base_ptr, _) = base.to_ptr_extra_aligned();
let (elem_ty, n) = base.elem_ty_and_len(base_ty);
let elem_size = self.type_size(elem_ty)?.expect("sequence element must be sized");
};
let ptr = base_ptr.offset(index * elem_size, &self)?;
- (ptr, LvalueExtra::None, aligned)
+ (ptr, LvalueExtra::None)
}
Subslice { from, to } => {
// FIXME(solson)
let base = self.force_allocation(base)?;
- let (base_ptr, _, aligned) = base.to_ptr_extra_aligned();
+ let (base_ptr, _) = base.to_ptr_extra_aligned();
let (elem_ty, n) = base.elem_ty_and_len(base_ty);
let elem_size = self.type_size(elem_ty)?.expect("slice element must be sized");
assert!(u64::from(from) <= n - u64::from(to));
let ptr = base_ptr.offset(u64::from(from) * elem_size, &self)?;
let extra = LvalueExtra::Length(n - u64::from(to) - u64::from(from));
- (ptr, extra, aligned)
+ (ptr, extra)
}
};
- Ok(Lvalue::Ptr { ptr, extra, aligned })
+ Ok(Lvalue::Ptr { ptr, extra })
}
pub(super) fn lvalue_ty(&self, lvalue: &mir::Lvalue<'tcx>) -> Ty<'tcx> {
StackPopCleanup,
DynamicLifetime,
TyAndPacked,
+ PtrAndAlign,
};
pub use self::lvalue::{
use super::{
EvalResult,
- EvalContext, StackPopCleanup, TyAndPacked,
+ EvalContext, StackPopCleanup, TyAndPacked, PtrAndAlign,
GlobalId, Lvalue,
HasMemory, Kind,
Machine,
let ptr = self.memory.allocate(ptr_size, ptr_size, Kind::UninitializedStatic)?;
self.memory.write_usize(ptr, 0)?;
self.memory.mark_static_initalized(ptr.alloc_id, mutability)?;
- self.globals.insert(cid, ptr);
+ self.globals.insert(cid, PtrAndAlign { ptr: ptr.into(), aligned: true });
return Ok(false);
}
let mir = self.load_mir(instance.def)?;
let size = self.type_size_with_substs(mir.return_ty, substs)?.expect("unsized global");
let align = self.type_align_with_substs(mir.return_ty, substs)?;
let ptr = self.memory.allocate(size, align, Kind::UninitializedStatic)?;
- self.globals.insert(cid, ptr);
+ let aligned = !self.is_packed(mir.return_ty)?;
+ self.globals.insert(cid, PtrAndAlign { ptr: ptr.into(), aligned });
let internally_mutable = !mir.return_ty.is_freeze(
self.tcx,
ty::ParamEnv::empty(Reveal::All),
let size = this.ecx.type_size_with_substs(mir.return_ty, this.instance.substs)?.expect("unsized global");
let align = this.ecx.type_align_with_substs(mir.return_ty, this.instance.substs)?;
let ptr = this.ecx.memory.allocate(size, align, Kind::UninitializedStatic)?;
- this.ecx.globals.insert(cid, ptr);
+ let aligned = !this.ecx.is_packed(mir.return_ty)?;
+ this.ecx.globals.insert(cid, PtrAndAlign { ptr: ptr.into(), aligned });
trace!("pushing stack frame for {:?}", index);
this.ecx.push_stack_frame(this.instance,
constant.span,
// However, unaligned accesses will probably make the actual drop implementation fail -- a problem shared
// by rustc.
let val = match self.force_allocation(lval)? {
- Lvalue::Ptr { ptr, extra: LvalueExtra::Vtable(vtable), aligned: _ } => ptr.to_value_with_vtable(vtable),
- Lvalue::Ptr { ptr, extra: LvalueExtra::Length(len), aligned: _ } => ptr.to_value_with_len(len),
- Lvalue::Ptr { ptr, extra: LvalueExtra::None, aligned: _ } => ptr.to_value(),
+ Lvalue::Ptr { ptr, extra: LvalueExtra::Vtable(vtable) } => ptr.ptr.to_value_with_vtable(vtable),
+ Lvalue::Ptr { ptr, extra: LvalueExtra::Length(len) } => ptr.ptr.to_value_with_len(len),
+ Lvalue::Ptr { ptr, extra: LvalueExtra::None } => ptr.ptr.to_value(),
_ => bug!("force_allocation broken"),
};
self.drop(val, instance, ty, span)
use super::{
EvalError, EvalResult, EvalErrorKind,
- EvalContext, eval_context, TyAndPacked,
+ EvalContext, eval_context, TyAndPacked, PtrAndAlign,
Lvalue,
MemoryPointer,
PrimVal, Value,
if self.frame().mir.args_iter().count() == fields.len() + 1 {
let offsets = variant.offsets.iter().map(|s| s.bytes());
match arg_val {
- Value::ByRef { ptr, aligned } => {
+ Value::ByRef(PtrAndAlign { ptr, aligned }) => {
assert!(aligned, "Unaligned ByRef-values cannot occur as function arguments");
for ((offset, ty), arg_local) in offsets.zip(fields).zip(arg_locals) {
- let arg = Value::ByRef { ptr: ptr.offset(offset, &self)?, aligned: true};
+ let arg = Value::by_ref(ptr.offset(offset, &self)?);
let dest = self.eval_lvalue(&mir::Lvalue::Local(arg_local))?;
trace!("writing arg {:?} to {:?} (type: {})", arg, dest, ty);
self.write_value(arg, dest, ty)?;
};
if is_owning {
match query.lval {
- Lvalue::Ptr { ptr, extra, aligned: _ } => {
+ Lvalue::Ptr { ptr, extra } => {
// Determine the size
// FIXME: Can we reuse size_and_align_of_dst for Lvalues?
let len = match self.type_size(query.ty)? {
EvalResult,
Memory, MemoryPointer, HasMemory, PointerArithmetic,
Machine,
+ PtrAndAlign,
};
pub(super) fn bytes_to_f32(bytes: u128) -> f32 {
/// operations and fat pointers. This idea was taken from rustc's trans.
#[derive(Clone, Copy, Debug)]
pub enum Value {
- ByRef { ptr: Pointer, aligned: bool},
+ ByRef(PtrAndAlign),
ByVal(PrimVal),
ByValPair(PrimVal, PrimVal),
}
}
}
-impl<'a> ::std::convert::From<&'a MemoryPointer> for Pointer {
- fn from(ptr: &'a MemoryPointer) -> Self {
- PrimVal::Ptr(*ptr).into()
- }
-}
-
/// A `PrimVal` represents an immediate, primitive value existing outside of a
/// `memory::Allocation`. It is in many ways like a small chunk of a `Allocation`, up to 8 bytes in
/// size. Like a range of bytes in an `Allocation`, a `PrimVal` can either represent the raw bytes
impl<'a, 'tcx: 'a> Value {
#[inline]
pub fn by_ref(ptr: Pointer) -> Self {
- Value::ByRef { ptr, aligned: true }
+ Value::ByRef(PtrAndAlign { ptr, aligned: true })
}
/// Convert the value into a pointer (or a pointer-sized integer). If the value is a ByRef,
pub fn into_ptr<M: Machine<'tcx>>(&self, mem: &Memory<'a, 'tcx, M>) -> EvalResult<'tcx, Pointer> {
use self::Value::*;
match *self {
- ByRef { ptr, aligned } => {
+ ByRef(PtrAndAlign { ptr, aligned }) => {
mem.read_maybe_aligned(aligned, |mem| mem.read_ptr(ptr.to_ptr()?) )
},
ByVal(ptr) | ByValPair(ptr, _) => Ok(ptr.into()),
) -> EvalResult<'tcx, (Pointer, MemoryPointer)> {
use self::Value::*;
match *self {
- ByRef { ptr: ref_ptr, aligned } => {
+ ByRef(PtrAndAlign { ptr: ref_ptr, aligned }) => {
mem.read_maybe_aligned(aligned, |mem| {
let ptr = mem.read_ptr(ref_ptr.to_ptr()?)?;
let vtable = mem.read_ptr(ref_ptr.offset(mem.pointer_size(), mem.layout)?.to_ptr()?)?;
pub(super) fn into_slice<M: Machine<'tcx>>(&self, mem: &Memory<'a, 'tcx, M>) -> EvalResult<'tcx, (Pointer, u64)> {
use self::Value::*;
match *self {
- ByRef { ptr: ref_ptr, aligned } => {
+ ByRef(PtrAndAlign { ptr: ref_ptr, aligned } ) => {
mem.read_maybe_aligned(aligned, |mem| {
let ptr = mem.read_ptr(ref_ptr.to_ptr()?)?;
let len = mem.read_usize(ref_ptr.offset(mem.pointer_size(), mem.layout)?.to_ptr()?)?;
--- /dev/null
+#[repr(packed)]
+struct Foo {
+ i: i32
+}
+
+fn main() {
+ assert_eq!({FOO.i}, 42);
+}
+
+static FOO: Foo = Foo { i: 42 };