-use super::{FunctionCx, LocalRef};
use super::operand::OperandValue;
+use super::{FunctionCx, LocalRef};
-use crate::MemFlags;
use crate::common::IntPredicate;
use crate::glue;
use crate::traits::*;
+use crate::MemFlags;
-use rustc::ty::{self, Instance, Ty};
-use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, VariantIdx, HasTyCtxt};
use rustc::mir;
use rustc::mir::tcx::PlaceTy;
+use rustc::ty::layout::{self, Align, HasTyCtxt, LayoutOf, TyLayout, VariantIdx};
+use rustc::ty::{self, Ty};
#[derive(Copy, Clone, Debug)]
pub struct PlaceRef<'tcx, V> {
}
impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
- pub fn new_sized(
- llval: V,
- layout: TyLayout<'tcx>,
- ) -> PlaceRef<'tcx, V> {
+ pub fn new_sized(llval: V, layout: TyLayout<'tcx>) -> PlaceRef<'tcx, V> {
assert!(!layout.is_unsized());
- PlaceRef {
- llval,
- llextra: None,
- layout,
- align: layout.align.abi
- }
+ PlaceRef { llval, llextra: None, layout, align: layout.align.abi }
}
- pub fn new_sized_aligned(
- llval: V,
- layout: TyLayout<'tcx>,
- align: Align,
- ) -> PlaceRef<'tcx, V> {
+ pub fn new_sized_aligned(llval: V, layout: TyLayout<'tcx>, align: Align) -> PlaceRef<'tcx, V> {
assert!(!layout.is_unsized());
- PlaceRef {
- llval,
- llextra: None,
- layout,
- align
- }
+ PlaceRef { llval, llextra: None, layout, align }
}
fn new_thin_place<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
layout: TyLayout<'tcx>,
) -> PlaceRef<'tcx, V> {
assert!(!bx.cx().type_has_metadata(layout.ty));
- PlaceRef {
- llval,
- llextra: None,
- layout,
- align: layout.align.abi
- }
+ PlaceRef { llval, llextra: None, layout, align: layout.align.abi }
}
// FIXME(eddyb) pass something else for the name so no work is done
Self::alloca(bx, ptr_layout)
}
- pub fn len<Cx: ConstMethods<'tcx, Value = V>>(
- &self,
- cx: &Cx
- ) -> V {
+ pub fn len<Cx: ConstMethods<'tcx, Value = V>>(&self, cx: &Cx) -> V {
if let layout::FieldPlacement::Array { count, .. } = self.layout.fields {
if self.layout.is_unsized() {
assert_eq!(count, 0);
impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
/// Access a field, at a point when the value's case is known.
pub fn project_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
- self, bx: &mut Bx,
+ self,
+ bx: &mut Bx,
ix: usize,
) -> Self {
let field = self.layout.field(bx.cx(), ix);
PlaceRef {
// HACK(eddyb): have to bitcast pointers until LLVM removes pointee types.
llval: bx.pointercast(llval, bx.cx().type_ptr_to(bx.cx().backend_type(field))),
- llextra: if bx.cx().type_has_metadata(field.ty) {
- self.llextra
- } else {
- None
- },
+ llextra: if bx.cx().type_has_metadata(field.ty) { self.llextra } else { None },
layout: field,
align: effective_field_align,
}
// * packed struct - there is no alignment padding
match field.ty.kind {
_ if self.llextra.is_none() => {
- debug!("unsized field `{}`, of `{:?}` has no metadata for adjustment",
- ix, self.llval);
+ debug!(
+ "unsized field `{}`, of `{:?}` has no metadata for adjustment",
+ ix, self.llval
+ );
return simple();
}
_ if !field.is_unsized() => return simple(),
pub fn codegen_get_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
self,
bx: &mut Bx,
- cast_to: Ty<'tcx>
+ cast_to: Ty<'tcx>,
) -> V {
let cast_to = bx.cx().immediate_backend_type(bx.cx().layout_of(cast_to));
if self.layout.abi.is_uninhabited() {
}
let (discr_scalar, discr_kind, discr_index) = match self.layout.variants {
layout::Variants::Single { index } => {
- let discr_val = self.layout.ty.discriminant_for_variant(bx.cx().tcx(), index)
+ let discr_val = self
+ .layout
+ .ty
+ .discriminant_for_variant(bx.cx().tcx(), index)
.map_or(index.as_u32() as u128, |discr| discr.val);
return bx.cx().const_uint_big(cast_to, discr_val);
}
// let LLVM interpret the `i1` as signed, because
// then `i1 1` (i.e., `E::B`) is effectively `i8 -1`.
layout::Int(_, signed) => !discr_scalar.is_bool() && signed,
- _ => false
+ _ => false,
};
bx.intcast(encoded_discr.immediate(), cast_to, signed)
}
pub fn codegen_set_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
&self,
bx: &mut Bx,
- variant_index: VariantIdx
+ variant_index: VariantIdx,
) {
if self.layout.for_variant(bx.cx(), variant_index).abi.is_uninhabited() {
// We play it safe by using a well-defined `abort`, but we could go for immediate UB
bx.store(
bx.cx().const_uint_big(bx.cx().backend_type(ptr.layout), to),
ptr.llval,
- ptr.align);
+ ptr.align,
+ );
}
layout::Variants::Multiple {
- discr_kind: layout::DiscriminantKind::Niche {
- dataful_variant,
- ref niche_variants,
- niche_start,
- },
+ discr_kind:
+ layout::DiscriminantKind::Niche { dataful_variant, ref niche_variants, niche_start },
discr_index,
..
} => {
if variant_index != dataful_variant {
- if bx.cx().sess().target.target.arch == "arm" ||
- bx.cx().sess().target.target.arch == "aarch64" {
+ if bx.cx().sess().target.target.arch == "arm"
+ || bx.cx().sess().target.target.arch == "aarch64"
+ {
// FIXME(#34427): as workaround for LLVM bug on ARM,
// use memset of 0 before assigning niche value.
let fill_byte = bx.cx().const_u8(0);
let niche = self.project_field(bx, discr_index);
let niche_llty = bx.cx().immediate_backend_type(niche.layout);
let niche_value = variant_index.as_u32() - niche_variants.start().as_u32();
- let niche_value = (niche_value as u128)
- .wrapping_add(niche_start);
+ let niche_value = (niche_value as u128).wrapping_add(niche_start);
// FIXME(eddyb): check the actual primitive type here.
let niche_llval = if niche_value == 0 {
// HACK(eddyb): using `c_null` as it works on all types.
pub fn project_index<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
&self,
bx: &mut Bx,
- llindex: V
+ llindex: V,
) -> Self {
// Statically compute the offset if we can, otherwise just use the element size,
// as this will yield the lowest alignment.
pub fn project_downcast<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
&self,
bx: &mut Bx,
- variant_index: VariantIdx
+ variant_index: VariantIdx,
) -> Self {
let mut downcast = *self;
downcast.layout = self.layout.for_variant(bx.cx(), variant_index);
pub fn codegen_place(
&mut self,
bx: &mut Bx,
- place_ref: &mir::PlaceRef<'_, 'tcx>
+ place_ref: &mir::PlaceRef<'_, 'tcx>,
) -> PlaceRef<'tcx, Bx::Value> {
debug!("codegen_place(place_ref={:?})", place_ref);
let cx = self.cx;
let tcx = self.cx.tcx();
- let result = match &place_ref {
- mir::PlaceRef {
- base: mir::PlaceBase::Local(index),
- projection: [],
- } => {
+ let result = match place_ref {
+ mir::PlaceRef { base: mir::PlaceBase::Local(index), projection: [] } => {
match self.locals[*index] {
LocalRef::Place(place) => {
return place;
}
}
mir::PlaceRef {
- base: mir::PlaceBase::Static(box mir::Static {
- ty,
- kind: mir::StaticKind::Promoted(promoted, substs),
- def_id,
- }),
- projection: [],
- } => {
- let param_env = ty::ParamEnv::reveal_all();
- let instance = Instance::new(*def_id, self.monomorphize(substs));
- let cid = mir::interpret::GlobalId {
- instance: instance,
- promoted: Some(*promoted),
- };
- let layout = cx.layout_of(self.monomorphize(&ty));
- match bx.tcx().const_eval(param_env.and(cid)) {
- Ok(val) => match val.val {
- ty::ConstKind::Value(mir::interpret::ConstValue::ByRef {
- alloc, offset
- }) => {
- bx.cx().from_const_alloc(layout, alloc, offset)
- }
- _ => bug!("promoteds should have an allocation: {:?}", val),
- },
- Err(_) => {
- // This is unreachable as long as runtime
- // and compile-time agree perfectly.
- // With floats that won't always be true,
- // so we generate a (safe) abort.
- bx.abort();
- // We still have to return a place but it doesn't matter,
- // this code is unreachable.
- let llval = bx.cx().const_undef(
- bx.cx().type_ptr_to(bx.cx().backend_type(layout))
- );
- PlaceRef::new_sized(llval, layout)
- }
- }
- }
- mir::PlaceRef {
- base: mir::PlaceBase::Static(box mir::Static {
- ty,
- kind: mir::StaticKind::Static,
- def_id,
- }),
+ base: mir::PlaceBase::Static(box mir::Static { ty, def_id }),
projection: [],
} => {
// NB: The layout of a static may be unsized as is the case when working
let layout = cx.layout_of(self.monomorphize(&ty));
let static_ = bx.get_static(*def_id);
PlaceRef::new_thin_place(bx, static_, layout)
- },
- mir::PlaceRef {
- base,
- projection: [proj_base @ .., mir::ProjectionElem::Deref],
- } => {
+ }
+ mir::PlaceRef { base, projection: [proj_base @ .., mir::ProjectionElem::Deref] } => {
// Load the pointer from its location.
- self.codegen_consume(bx, &mir::PlaceRef {
- base,
- projection: proj_base,
- }).deref(bx.cx())
+ self.codegen_consume(bx, &mir::PlaceRef { base, projection: proj_base })
+ .deref(bx.cx())
}
- mir::PlaceRef {
- base,
- projection: [proj_base @ .., elem],
- } => {
+ mir::PlaceRef { base, projection: [proj_base @ .., elem] } => {
// FIXME turn this recursion into iteration
- let cg_base = self.codegen_place(bx, &mir::PlaceRef {
- base,
- projection: proj_base,
- });
+ let cg_base =
+ self.codegen_place(bx, &mir::PlaceRef { base, projection: proj_base });
match elem {
mir::ProjectionElem::Deref => bug!(),
cg_base.project_field(bx, field.index())
}
mir::ProjectionElem::Index(index) => {
- let index = &mir::Operand::Copy(
- mir::Place::from(*index)
- );
+ let index = &mir::Operand::Copy(mir::Place::from(*index));
let index = self.codegen_operand(bx, index);
let llindex = index.immediate();
cg_base.project_index(bx, llindex)
}
- mir::ProjectionElem::ConstantIndex { offset,
- from_end: false,
- min_length: _ } => {
+ mir::ProjectionElem::ConstantIndex {
+ offset,
+ from_end: false,
+ min_length: _,
+ } => {
let lloffset = bx.cx().const_usize(*offset as u64);
cg_base.project_index(bx, lloffset)
}
- mir::ProjectionElem::ConstantIndex { offset,
- from_end: true,
- min_length: _ } => {
+ mir::ProjectionElem::ConstantIndex {
+ offset,
+ from_end: true,
+ min_length: _,
+ } => {
let lloffset = bx.cx().const_usize(*offset as u64);
let lllen = cg_base.len(bx.cx());
let llindex = bx.sub(lllen, lloffset);
cg_base.project_index(bx, llindex)
}
mir::ProjectionElem::Subslice { from, to, from_end } => {
- let mut subslice = cg_base.project_index(bx,
- bx.cx().const_usize(*from as u64));
- let projected_ty = PlaceTy::from_ty(cg_base.layout.ty)
- .projection_ty(tcx, elem).ty;
+ let mut subslice =
+ cg_base.project_index(bx, bx.cx().const_usize(*from as u64));
+ let projected_ty =
+ PlaceTy::from_ty(cg_base.layout.ty).projection_ty(tcx, elem).ty;
subslice.layout = bx.cx().layout_of(self.monomorphize(&projected_ty));
if subslice.layout.is_unsized() {
assert!(from_end, "slice subslices should be `from_end`");
- subslice.llextra = Some(bx.sub(cg_base.llextra.unwrap(),
- bx.cx().const_usize((*from as u64) + (*to as u64))));
+ subslice.llextra = Some(bx.sub(
+ cg_base.llextra.unwrap(),
+ bx.cx().const_usize((*from as u64) + (*to as u64)),
+ ));
}
// Cast the place pointer type to the new
// array or slice type (`*[%_; new_len]`).
- subslice.llval = bx.pointercast(subslice.llval,
- bx.cx().type_ptr_to(bx.cx().backend_type(subslice.layout)));
+ subslice.llval = bx.pointercast(
+ subslice.llval,
+ bx.cx().type_ptr_to(bx.cx().backend_type(subslice.layout)),
+ );
subslice
}
- mir::ProjectionElem::Downcast(_, v) => {
- cg_base.project_downcast(bx, *v)
- }
+ mir::ProjectionElem::Downcast(_, v) => cg_base.project_downcast(bx, *v),
}
}
};
pub fn monomorphized_place_ty(&self, place_ref: &mir::PlaceRef<'_, 'tcx>) -> Ty<'tcx> {
let tcx = self.cx.tcx();
- let place_ty = mir::Place::ty_from(
- place_ref.base,
- place_ref.projection,
- *self.mir,
- tcx,
- );
+ let place_ty = mir::Place::ty_from(place_ref.base, place_ref.projection, *self.mir, tcx);
self.monomorphize(&place_ty.ty)
}
}