1 // The classification code for the x86_64 ABI is taken from the clay language
2 // https://github.com/jckarter/clay/blob/master/compiler/src/externals.cpp
4 use crate::abi::call::{ArgAbi, CastTarget, FnAbi, Reg, RegKind};
5 use crate::abi::{self, Abi, HasDataLayout, LayoutOf, Size, TyLayout, TyLayoutMethods};
7 /// Classification of "eightbyte" components.
8 // N.B., the order of the variants is from general to specific,
9 // such that `unify(a, b)` is the "smaller" of `a` and `b`.
10 #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Debug)]
17 #[derive(Clone, Copy, Debug)]
20 // Currently supported vector size (AVX-512).
21 const LARGEST_VECTOR_SIZE: usize = 512;
22 const MAX_EIGHTBYTES: usize = LARGEST_VECTOR_SIZE / 64;
24 fn classify_arg<'a, Ty, C>(
27 ) -> Result<[Option<Class>; MAX_EIGHTBYTES], Memory>
29 Ty: TyLayoutMethods<'a, C> + Copy,
30 C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout,
32 fn classify<'a, Ty, C>(
34 layout: TyLayout<'a, Ty>,
35 cls: &mut [Option<Class>],
37 ) -> Result<(), Memory>
39 Ty: TyLayoutMethods<'a, C> + Copy,
40 C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout,
42 if !off.is_aligned(layout.align.abi) {
49 let mut c = match layout.abi {
50 Abi::Uninhabited => return Ok(()),
52 Abi::Scalar(ref scalar) => match scalar.value {
53 abi::Int(..) | abi::Pointer => Class::Int,
54 abi::F32 | abi::F64 => Class::Sse,
57 Abi::Vector { .. } => Class::Sse,
59 Abi::ScalarPair(..) | Abi::Aggregate { .. } => match layout.variants {
60 abi::Variants::Single { .. } => {
61 for i in 0..layout.fields.count() {
62 let field_off = off + layout.fields.offset(i);
63 classify(cx, layout.field(cx, i), cls, field_off)?;
67 abi::Variants::Multiple { .. } => return Err(Memory),
71 // Fill in `cls` for scalars (Int/Sse) and vectors (Sse).
72 let first = (off.bytes() / 8) as usize;
73 let last = ((off.bytes() + layout.size.bytes() - 1) / 8) as usize;
74 for cls in &mut cls[first..=last] {
75 *cls = Some(cls.map_or(c, |old| old.min(c)));
77 // Everything after the first Sse "eightbyte"
78 // component is the upper half of a register.
87 let n = ((arg.layout.size.bytes() + 7) / 8) as usize;
88 if n > MAX_EIGHTBYTES {
92 let mut cls = [None; MAX_EIGHTBYTES];
93 classify(cx, arg.layout, &mut cls, Size::ZERO)?;
95 if cls[0] != Some(Class::Sse) {
98 if cls[1..n].iter().any(|&c| c != Some(Class::SseUp)) {
104 if cls[i] == Some(Class::SseUp) {
105 cls[i] = Some(Class::Sse);
106 } else if cls[i] == Some(Class::Sse) {
108 while i != n && cls[i] == Some(Class::SseUp) {
120 fn reg_component(cls: &[Option<Class>], i: &mut usize, size: Size) -> Option<Reg> {
127 Some(Class::Int) => {
129 Some(if size.bytes() < 8 { Reg { kind: RegKind::Integer, size } } else { Reg::i64() })
131 Some(Class::Sse) => {
133 1 + cls[*i + 1..].iter().take_while(|&&c| c == Some(Class::SseUp)).count();
135 Some(if vec_len == 1 {
141 Reg { kind: RegKind::Vector, size: Size::from_bytes(8) * (vec_len as u64) }
144 Some(c) => unreachable!("reg_component: unhandled class {:?}", c),
148 fn cast_target(cls: &[Option<Class>], size: Size) -> CastTarget {
150 let lo = reg_component(cls, &mut i, size).unwrap();
151 let offset = Size::from_bytes(8) * (i as u64);
152 let mut target = CastTarget::from(lo);
154 if let Some(hi) = reg_component(cls, &mut i, size - offset) {
155 target = CastTarget::pair(lo, hi);
158 assert_eq!(reg_component(cls, &mut i, Size::ZERO), None);
162 const MAX_INT_REGS: usize = 6; // RDI, RSI, RDX, RCX, R8, R9
163 const MAX_SSE_REGS: usize = 8; // XMM0-7
165 pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
167 Ty: TyLayoutMethods<'a, C> + Copy,
168 C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout,
170 let mut int_regs = MAX_INT_REGS;
171 let mut sse_regs = MAX_SSE_REGS;
173 let mut x86_64_arg_or_ret = |arg: &mut ArgAbi<'a, Ty>, is_arg: bool| {
174 let mut cls_or_mem = classify_arg(cx, arg);
177 if let Ok(cls) = cls_or_mem {
178 let mut needed_int = 0;
179 let mut needed_sse = 0;
182 Some(Class::Int) => needed_int += 1,
183 Some(Class::Sse) => needed_sse += 1,
187 match (int_regs.checked_sub(needed_int), sse_regs.checked_sub(needed_sse)) {
188 (Some(left_int), Some(left_sse)) => {
193 // Not enough registers for this argument, so it will be
194 // passed on the stack, but we only mark aggregates
195 // explicitly as indirect `byval` arguments, as LLVM will
196 // automatically put immediates on the stack itself.
197 if arg.layout.is_aggregate() {
198 cls_or_mem = Err(Memory);
208 arg.make_indirect_byval();
210 // `sret` parameter thus one less integer register available
212 // NOTE(eddyb) return is handled first, so no registers
213 // should've been used yet.
214 assert_eq!(int_regs, MAX_INT_REGS);
219 // split into sized chunks passed individually
220 if arg.layout.is_aggregate() {
221 let size = arg.layout.size;
222 arg.cast_to(cast_target(cls, size))
224 arg.extend_integer_width_to(32);
230 if !fn_abi.ret.is_ignore() {
231 x86_64_arg_or_ret(&mut fn_abi.ret, false);
234 for arg in &mut fn_abi.args {
238 x86_64_arg_or_ret(arg, true);