1 use crate::abi::call::{ArgAbi, ArgAttribute, CastTarget, FnAbi, PassMode, Reg, RegKind, Uniform};
2 use crate::abi::{self, HasDataLayout, LayoutOf, Size, TyLayout, TyLayoutMethods};
4 fn extend_integer_width_mips<Ty>(arg: &mut ArgAbi<'_, Ty>, bits: u64) {
5 // Always sign extend u32 values on 64-bit mips
6 if let abi::Abi::Scalar(ref scalar) = arg.layout.abi {
7 if let abi::Int(i, signed) = scalar.value {
8 if !signed && i.size().bits() == 32 {
9 if let PassMode::Direct(ref mut attrs) = arg.mode {
10 attrs.set(ArgAttribute::SExt);
17 arg.extend_integer_width_to(bits);
20 fn float_reg<'a, Ty, C>(cx: &C, ret: &ArgAbi<'a, Ty>, i: usize) -> Option<Reg>
22 Ty: TyLayoutMethods<'a, C> + Copy,
23 C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout,
25 match ret.layout.field(cx, i).abi {
26 abi::Abi::Scalar(ref scalar) => match scalar.value {
27 abi::F32 => Some(Reg::f32()),
28 abi::F64 => Some(Reg::f64()),
35 fn classify_ret<'a, Ty, C>(cx: &C, ret: &mut ArgAbi<'a, Ty>)
37 Ty: TyLayoutMethods<'a, C> + Copy,
38 C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout,
40 if !ret.layout.is_aggregate() {
41 extend_integer_width_mips(ret, 64);
45 let size = ret.layout.size;
46 let bits = size.bits();
48 // Unlike other architectures which return aggregates in registers, MIPS n64 limits the
49 // use of float registers to structures (not unions) containing exactly one or two
52 if let abi::FieldPlacement::Arbitrary { .. } = ret.layout.fields {
53 if ret.layout.fields.count() == 1 {
54 if let Some(reg) = float_reg(cx, ret, 0) {
58 } else if ret.layout.fields.count() == 2 {
59 if let Some(reg0) = float_reg(cx, ret, 0) {
60 if let Some(reg1) = float_reg(cx, ret, 1) {
61 ret.cast_to(CastTarget::pair(reg0, reg1));
68 // Cast to a uniform int structure
69 ret.cast_to(Uniform { unit: Reg::i64(), total: size });
75 fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>)
77 Ty: TyLayoutMethods<'a, C> + Copy,
78 C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout,
80 if !arg.layout.is_aggregate() {
81 extend_integer_width_mips(arg, 64);
85 let dl = cx.data_layout();
86 let size = arg.layout.size;
87 let mut prefix = [None; 8];
88 let mut prefix_index = 0;
90 match arg.layout.fields {
91 abi::FieldPlacement::Array { .. } => {
92 // Arrays are passed indirectly
96 abi::FieldPlacement::Union(_) => {
97 // Unions and are always treated as a series of 64-bit integer chunks
99 abi::FieldPlacement::Arbitrary { .. } => {
100 // Structures are split up into a series of 64-bit integer chunks, but any aligned
101 // doubles not part of another aggregate are passed as floats.
102 let mut last_offset = Size::ZERO;
104 for i in 0..arg.layout.fields.count() {
105 let field = arg.layout.field(cx, i);
106 let offset = arg.layout.fields.offset(i);
108 // We only care about aligned doubles
109 if let abi::Abi::Scalar(ref scalar) = field.abi {
110 if let abi::F64 = scalar.value {
111 if offset.is_aligned(dl.f64_align.abi) {
112 // Insert enough integers to cover [last_offset, offset)
113 assert!(last_offset.is_aligned(dl.f64_align.abi));
114 for _ in 0..((offset - last_offset).bits() / 64)
115 .min((prefix.len() - prefix_index) as u64)
117 prefix[prefix_index] = Some(RegKind::Integer);
121 if prefix_index == prefix.len() {
125 prefix[prefix_index] = Some(RegKind::Float);
127 last_offset = offset + Reg::f64().size;
135 // Extract first 8 chunks as the prefix
136 let rest_size = size - Size::from_bytes(8) * prefix_index as u64;
137 arg.cast_to(CastTarget {
139 prefix_chunk: Size::from_bytes(8),
140 rest: Uniform { unit: Reg::i64(), total: rest_size },
144 pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
146 Ty: TyLayoutMethods<'a, C> + Copy,
147 C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout,
149 if !fn_abi.ret.is_ignore() {
150 classify_ret(cx, &mut fn_abi.ret);
153 for arg in &mut fn_abi.args {
157 classify_arg(cx, arg);