+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub enum RegKind {
+ Integer,
+ Float,
+ Vector
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub struct Reg {
+ pub kind: RegKind,
+ pub size: Size,
+}
+
+macro_rules! reg_ctor {
+ ($name:ident, $kind:ident, $bits:expr) => {
+ pub fn $name() -> Reg {
+ Reg {
+ kind: RegKind::$kind,
+ size: Size::from_bits($bits)
+ }
+ }
+ }
+}
+
+impl Reg {
+ reg_ctor!(i8, Integer, 8);
+ reg_ctor!(i16, Integer, 16);
+ reg_ctor!(i32, Integer, 32);
+ reg_ctor!(i64, Integer, 64);
+
+ reg_ctor!(f32, Float, 32);
+ reg_ctor!(f64, Float, 64);
+}
+
+impl Reg {
+ fn llvm_type(&self, ccx: &CrateContext) -> Type {
+ match self.kind {
+ RegKind::Integer => Type::ix(ccx, self.size.bits()),
+ RegKind::Float => {
+ match self.size.bits() {
+ 32 => Type::f32(ccx),
+ 64 => Type::f64(ccx),
+ _ => bug!("unsupported float: {:?}", self)
+ }
+ }
+ RegKind::Vector => {
+ Type::vector(&Type::i8(ccx), self.size.bytes())
+ }
+ }
+ }
+}
+
+/// An argument passed entirely registers with the
+/// same kind (e.g. HFA / HVA on PPC64 and AArch64).
+#[derive(Copy, Clone)]
+pub struct Uniform {
+ pub unit: Reg,
+
+ /// The total size of the argument, which can be:
+ /// * equal to `unit.size` (one scalar/vector)
+ /// * a multiple of `unit.size` (an array of scalar/vectors)
+ /// * if `unit.kind` is `Integer`, the last element
+ /// can be shorter, i.e. `{ i64, i64, i32 }` for
+ /// 64-bit integers with a total size of 20 bytes
+ pub total: Size,
+}
+
+impl From<Reg> for Uniform {
+ fn from(unit: Reg) -> Uniform {
+ Uniform {
+ unit,
+ total: unit.size
+ }
+ }
+}
+
+impl Uniform {
+ fn llvm_type(&self, ccx: &CrateContext) -> Type {
+ let llunit = self.unit.llvm_type(ccx);
+
+ if self.total <= self.unit.size {
+ return llunit;
+ }
+
+ let count = self.total.bytes() / self.unit.size.bytes();
+ let rem_bytes = self.total.bytes() % self.unit.size.bytes();
+
+ if rem_bytes == 0 {
+ return Type::array(&llunit, count);
+ }
+
+ // Only integers can be really split further.
+ assert_eq!(self.unit.kind, RegKind::Integer);
+
+ let args: Vec<_> = (0..count).map(|_| llunit)
+ .chain(iter::once(Type::ix(ccx, rem_bytes * 8)))
+ .collect();
+
+ Type::struct_(ccx, &args, false)
+ }
+}
+
+pub trait LayoutExt<'tcx> {
+ fn is_aggregate(&self) -> bool;
+ fn homogenous_aggregate<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Option<Reg>;
+}
+
+impl<'tcx> LayoutExt<'tcx> for TyLayout<'tcx> {
+ fn is_aggregate(&self) -> bool {
+ match *self.layout {
+ Layout::Scalar { .. } |
+ Layout::RawNullablePointer { .. } |
+ Layout::CEnum { .. } |
+ Layout::Vector { .. } => false,
+
+ Layout::Array { .. } |
+ Layout::FatPointer { .. } |
+ Layout::Univariant { .. } |
+ Layout::UntaggedUnion { .. } |
+ Layout::General { .. } |
+ Layout::StructWrappedNullablePointer { .. } => true
+ }
+ }
+
+ fn homogenous_aggregate<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Option<Reg> {
+ match *self.layout {
+ // The primitives for this algorithm.
+ Layout::Scalar { value, .. } |
+ Layout::RawNullablePointer { value, .. } => {
+ let kind = match value {
+ layout::Int(_) |
+ layout::Pointer => RegKind::Integer,
+ layout::F32 |
+ layout::F64 => RegKind::Float
+ };
+ Some(Reg {
+ kind,
+ size: self.size(ccx)
+ })
+ }
+
+ Layout::CEnum { .. } => {
+ Some(Reg {
+ kind: RegKind::Integer,
+ size: self.size(ccx)
+ })
+ }
+
+ Layout::Vector { .. } => {
+ Some(Reg {
+ kind: RegKind::Integer,
+ size: self.size(ccx)
+ })
+ }
+
+ Layout::Array { count, .. } => {
+ if count > 0 {
+ self.field(ccx, 0).homogenous_aggregate(ccx)
+ } else {
+ None
+ }
+ }
+
+ Layout::Univariant { ref variant, .. } => {
+ let mut unaligned_offset = Size::from_bytes(0);
+ let mut result = None;
+
+ for i in 0..self.field_count() {
+ if unaligned_offset != variant.offsets[i] {
+ return None;
+ }
+
+ let field = self.field(ccx, i);
+ match (result, field.homogenous_aggregate(ccx)) {
+ // The field itself must be a homogenous aggregate.
+ (_, None) => return None,
+ // If this is the first field, record the unit.
+ (None, Some(unit)) => {
+ result = Some(unit);
+ }
+ // For all following fields, the unit must be the same.
+ (Some(prev_unit), Some(unit)) => {
+ if prev_unit != unit {
+ return None;
+ }
+ }
+ }
+
+ // Keep track of the offset (without padding).
+ let size = field.size(ccx);
+ match unaligned_offset.checked_add(size, ccx) {
+ Some(offset) => unaligned_offset = offset,
+ None => return None
+ }
+ }
+
+ // There needs to be no padding.
+ if unaligned_offset != self.size(ccx) {
+ None
+ } else {
+ result
+ }
+ }
+
+ Layout::UntaggedUnion { .. } => {
+ let mut max = Size::from_bytes(0);
+ let mut result = None;
+
+ for i in 0..self.field_count() {
+ let field = self.field(ccx, i);
+ match (result, field.homogenous_aggregate(ccx)) {
+ // The field itself must be a homogenous aggregate.
+ (_, None) => return None,
+ // If this is the first field, record the unit.
+ (None, Some(unit)) => {
+ result = Some(unit);
+ }
+ // For all following fields, the unit must be the same.
+ (Some(prev_unit), Some(unit)) => {
+ if prev_unit != unit {
+ return None;
+ }
+ }
+ }
+
+ // Keep track of the offset (without padding).
+ let size = field.size(ccx);
+ if size > max {
+ max = size;
+ }
+ }
+
+ // There needs to be no padding.
+ if max != self.size(ccx) {
+ None
+ } else {
+ result
+ }
+ }
+
+ // Rust-specific types, which we can ignore for C ABIs.
+ Layout::FatPointer { .. } |
+ Layout::General { .. } |
+ Layout::StructWrappedNullablePointer { .. } => None
+ }
+ }
+}
+
+pub enum CastTarget {
+ Uniform(Uniform),
+ Pair(Reg, Reg)
+}
+
+impl From<Reg> for CastTarget {
+ fn from(unit: Reg) -> CastTarget {
+ CastTarget::Uniform(Uniform::from(unit))
+ }
+}
+
+impl From<Uniform> for CastTarget {
+ fn from(uniform: Uniform) -> CastTarget {
+ CastTarget::Uniform(uniform)
+ }
+}
+
+impl CastTarget {
+ fn llvm_type(&self, ccx: &CrateContext) -> Type {
+ match *self {
+ CastTarget::Uniform(u) => u.llvm_type(ccx),
+ CastTarget::Pair(a, b) => {
+ Type::struct_(ccx, &[
+ a.llvm_type(ccx),
+ b.llvm_type(ccx)
+ ], false)
+ }
+ }
+ }
+}