1 // Copyright 2017 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 use abi::{self, Abi, Align, FieldPlacement, Size};
12 use abi::{HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods};
13 use spec::HasTargetSpec;
36 #[derive(Clone, Copy, PartialEq, Eq, Debug)]
38 /// Ignore the argument (useful for empty struct).
40 /// Pass the argument directly.
41 Direct(ArgAttributes),
42 /// Pass a pair's elements directly in two arguments.
43 Pair(ArgAttributes, ArgAttributes),
44 /// Pass the argument after casting it, to either
45 /// a single uniform or a pair of registers.
47 /// Pass the argument indirectly via a hidden pointer.
48 /// The second value, if any, is for the extra data (vtable or length)
49 /// which indicates that it refers to an unsized rvalue.
50 Indirect(ArgAttributes, Option<ArgAttributes>),
53 // Hack to disable non_upper_case_globals only for the bitflags! and not for the rest
55 pub use self::attr_impl::ArgAttribute;
57 #[allow(non_upper_case_globals)]
60 // The subset of llvm::Attribute needed for arguments, packed into a bitfield.
63 pub struct ArgAttribute: u16 {
65 const NoAlias = 1 << 1;
66 const NoCapture = 1 << 2;
67 const NonNull = 1 << 3;
68 const ReadOnly = 1 << 4;
70 const StructRet = 1 << 6;
77 /// A compact representation of LLVM attributes (at least those relevant for this module)
78 /// that can be manipulated without interacting with LLVM's Attribute machinery.
79 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
80 pub struct ArgAttributes {
81 pub regular: ArgAttribute,
82 pub pointee_size: Size,
83 pub pointee_align: Option<Align>
87 pub fn new() -> Self {
89 regular: ArgAttribute::default(),
90 pointee_size: Size::ZERO,
95 pub fn set(&mut self, attr: ArgAttribute) -> &mut Self {
100 pub fn contains(&self, attr: ArgAttribute) -> bool {
101 self.regular.contains(attr)
105 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
112 #[derive(Copy, Clone, PartialEq, Eq, Debug)]
118 macro_rules! reg_ctor {
119 ($name:ident, $kind:ident, $bits:expr) => {
120 pub fn $name() -> Reg {
122 kind: RegKind::$kind,
123 size: Size::from_bits($bits)
130 reg_ctor!(i8, Integer, 8);
131 reg_ctor!(i16, Integer, 16);
132 reg_ctor!(i32, Integer, 32);
133 reg_ctor!(i64, Integer, 64);
135 reg_ctor!(f32, Float, 32);
136 reg_ctor!(f64, Float, 64);
140 pub fn align<C: HasDataLayout>(&self, cx: &C) -> Align {
141 let dl = cx.data_layout();
143 RegKind::Integer => {
144 match self.size.bits() {
145 1 => dl.i1_align.abi,
146 2..=8 => dl.i8_align.abi,
147 9..=16 => dl.i16_align.abi,
148 17..=32 => dl.i32_align.abi,
149 33..=64 => dl.i64_align.abi,
150 65..=128 => dl.i128_align.abi,
151 _ => panic!("unsupported integer: {:?}", self)
155 match self.size.bits() {
156 32 => dl.f32_align.abi,
157 64 => dl.f64_align.abi,
158 _ => panic!("unsupported float: {:?}", self)
161 RegKind::Vector => dl.vector_align(self.size).abi,
166 /// An argument passed entirely registers with the
167 /// same kind (e.g. HFA / HVA on PPC64 and AArch64).
168 #[derive(Clone, Copy, PartialEq, Eq, Debug)]
172 /// The total size of the argument, which can be:
173 /// * equal to `unit.size` (one scalar/vector)
174 /// * a multiple of `unit.size` (an array of scalar/vectors)
175 /// * if `unit.kind` is `Integer`, the last element
176 /// can be shorter, i.e. `{ i64, i64, i32 }` for
177 /// 64-bit integers with a total size of 20 bytes
181 impl From<Reg> for Uniform {
182 fn from(unit: Reg) -> Uniform {
191 pub fn align<C: HasDataLayout>(&self, cx: &C) -> Align {
196 #[derive(Clone, Copy, PartialEq, Eq, Debug)]
197 pub struct CastTarget {
198 pub prefix: [Option<RegKind>; 8],
199 pub prefix_chunk: Size,
203 impl From<Reg> for CastTarget {
204 fn from(unit: Reg) -> CastTarget {
205 CastTarget::from(Uniform::from(unit))
209 impl From<Uniform> for CastTarget {
210 fn from(uniform: Uniform) -> CastTarget {
213 prefix_chunk: Size::ZERO,
220 pub fn pair(a: Reg, b: Reg) -> CastTarget {
222 prefix: [Some(a.kind), None, None, None, None, None, None, None],
223 prefix_chunk: a.size,
224 rest: Uniform::from(b)
228 pub fn size<C: HasDataLayout>(&self, cx: &C) -> Size {
229 (self.prefix_chunk * self.prefix.iter().filter(|x| x.is_some()).count() as u64)
230 .align_to(self.rest.align(cx)) + self.rest.total
233 pub fn align<C: HasDataLayout>(&self, cx: &C) -> Align {
235 .filter_map(|x| x.map(|kind| Reg { kind, size: self.prefix_chunk }.align(cx)))
236 .fold(cx.data_layout().aggregate_align.abi.max(self.rest.align(cx)),
237 |acc, align| acc.max(align))
241 impl<'a, Ty> TyLayout<'a, Ty> {
242 fn is_aggregate(&self) -> bool {
246 Abi::Vector { .. } => false,
247 Abi::ScalarPair(..) |
248 Abi::Aggregate { .. } => true
252 fn homogeneous_aggregate<C>(&self, cx: &C) -> Option<Reg>
253 where Ty: TyLayoutMethods<'a, C> + Copy, C: LayoutOf<Ty = Ty, TyLayout = Self>
256 Abi::Uninhabited => None,
258 // The primitive for this algorithm.
259 Abi::Scalar(ref scalar) => {
260 let kind = match scalar.value {
262 abi::Pointer => RegKind::Integer,
263 abi::Float(_) => RegKind::Float,
271 Abi::Vector { .. } => {
273 kind: RegKind::Vector,
278 Abi::ScalarPair(..) |
279 Abi::Aggregate { .. } => {
280 let mut total = Size::ZERO;
281 let mut result = None;
283 let is_union = match self.fields {
284 FieldPlacement::Array { count, .. } => {
286 return self.field(cx, 0).homogeneous_aggregate(cx);
291 FieldPlacement::Union(_) => true,
292 FieldPlacement::Arbitrary { .. } => false
295 for i in 0..self.fields.count() {
296 if !is_union && total != self.fields.offset(i) {
300 let field = self.field(cx, i);
301 match (result, field.homogeneous_aggregate(cx)) {
302 // The field itself must be a homogeneous aggregate.
303 (_, None) => return None,
304 // If this is the first field, record the unit.
305 (None, Some(unit)) => {
308 // For all following fields, the unit must be the same.
309 (Some(prev_unit), Some(unit)) => {
310 if prev_unit != unit {
316 // Keep track of the offset (without padding).
317 let size = field.size;
319 total = total.max(size);
325 // There needs to be no padding.
326 if total != self.size {
336 /// Information about how to pass an argument to,
337 /// or return a value from, a function, under some ABI.
339 pub struct ArgType<'a, Ty> {
340 pub layout: TyLayout<'a, Ty>,
342 /// Dummy argument, which is emitted before the real argument.
343 pub pad: Option<Reg>,
348 impl<'a, Ty> ArgType<'a, Ty> {
349 pub fn new(layout: TyLayout<'a, Ty>) -> Self {
353 mode: PassMode::Direct(ArgAttributes::new()),
357 pub fn make_indirect(&mut self) {
358 assert_eq!(self.mode, PassMode::Direct(ArgAttributes::new()));
360 // Start with fresh attributes for the pointer.
361 let mut attrs = ArgAttributes::new();
363 // For non-immediate arguments the callee gets its own copy of
364 // the value on the stack, so there are no aliases. It's also
365 // program-invisible so can't possibly capture
366 attrs.set(ArgAttribute::NoAlias)
367 .set(ArgAttribute::NoCapture)
368 .set(ArgAttribute::NonNull);
369 attrs.pointee_size = self.layout.size;
370 // FIXME(eddyb) We should be doing this, but at least on
371 // i686-pc-windows-msvc, it results in wrong stack offsets.
372 // attrs.pointee_align = Some(self.layout.align.abi);
374 let extra_attrs = if self.layout.is_unsized() {
375 Some(ArgAttributes::new())
380 self.mode = PassMode::Indirect(attrs, extra_attrs);
383 pub fn make_indirect_byval(&mut self) {
384 self.make_indirect();
386 PassMode::Indirect(ref mut attrs, _) => {
387 attrs.set(ArgAttribute::ByVal);
393 pub fn extend_integer_width_to(&mut self, bits: u64) {
394 // Only integers have signedness
395 if let Abi::Scalar(ref scalar) = self.layout.abi {
396 if let abi::Int(i, signed) = scalar.value {
397 if i.size().bits() < bits {
398 if let PassMode::Direct(ref mut attrs) = self.mode {
399 attrs.set(if signed {
410 pub fn cast_to<T: Into<CastTarget>>(&mut self, target: T) {
411 assert_eq!(self.mode, PassMode::Direct(ArgAttributes::new()));
412 self.mode = PassMode::Cast(target.into());
415 pub fn pad_with(&mut self, reg: Reg) {
416 self.pad = Some(reg);
419 pub fn is_indirect(&self) -> bool {
421 PassMode::Indirect(..) => true,
426 pub fn is_sized_indirect(&self) -> bool {
428 PassMode::Indirect(_, None) => true,
433 pub fn is_unsized_indirect(&self) -> bool {
435 PassMode::Indirect(_, Some(_)) => true,
440 pub fn is_ignore(&self) -> bool {
441 self.mode == PassMode::Ignore
445 #[derive(Copy, Clone, PartialEq, Debug)]
467 /// Metadata describing how the arguments to a native function
468 /// should be passed in order to respect the native ABI.
470 /// I will do my best to describe this structure, but these
471 /// comments are reverse-engineered and may be inaccurate. -NDM
473 pub struct FnType<'a, Ty> {
474 /// The LLVM types of each argument.
475 pub args: Vec<ArgType<'a, Ty>>,
477 /// LLVM return type.
478 pub ret: ArgType<'a, Ty>,
485 impl<'a, Ty> FnType<'a, Ty> {
486 pub fn adjust_for_cabi<C>(&mut self, cx: &C, abi: ::spec::abi::Abi) -> Result<(), String>
487 where Ty: TyLayoutMethods<'a, C> + Copy,
488 C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout + HasTargetSpec
490 match &cx.target_spec().arch[..] {
492 let flavor = if abi == ::spec::abi::Abi::Fastcall {
493 x86::Flavor::Fastcall
497 x86::compute_abi_info(cx, self, flavor);
499 "x86_64" => if abi == ::spec::abi::Abi::SysV64 {
500 x86_64::compute_abi_info(cx, self);
501 } else if abi == ::spec::abi::Abi::Win64 || cx.target_spec().options.is_like_windows {
502 x86_win64::compute_abi_info(self);
504 x86_64::compute_abi_info(cx, self);
506 "aarch64" => aarch64::compute_abi_info(cx, self),
507 "amdgpu" => amdgpu::compute_abi_info(cx, self),
508 "arm" => arm::compute_abi_info(cx, self),
509 "mips" => mips::compute_abi_info(cx, self),
510 "mips64" => mips64::compute_abi_info(cx, self),
511 "powerpc" => powerpc::compute_abi_info(cx, self),
512 "powerpc64" => powerpc64::compute_abi_info(cx, self),
513 "s390x" => s390x::compute_abi_info(cx, self),
514 "asmjs" => asmjs::compute_abi_info(cx, self),
516 if cx.target_spec().llvm_target.contains("emscripten") {
517 asmjs::compute_abi_info(cx, self)
519 wasm32::compute_abi_info(self)
522 "msp430" => msp430::compute_abi_info(self),
523 "sparc" => sparc::compute_abi_info(cx, self),
524 "sparc64" => sparc64::compute_abi_info(cx, self),
525 "nvptx" => nvptx::compute_abi_info(self),
526 "nvptx64" => nvptx64::compute_abi_info(self),
527 "hexagon" => hexagon::compute_abi_info(self),
528 "riscv32" => riscv::compute_abi_info(self, 32),
529 "riscv64" => riscv::compute_abi_info(self, 64),
530 a => return Err(format!("unrecognized arch \"{}\" in target specification", a))
533 if let PassMode::Indirect(ref mut attrs, _) = self.ret.mode {
534 attrs.set(ArgAttribute::StructRet);