1 use crate::builder::Builder;
2 use crate::type_::Type;
3 use crate::type_of::LayoutLlvmExt;
4 use crate::value::Value;
5 use rustc_codegen_ssa::mir::operand::OperandRef;
6 use rustc_codegen_ssa::{
8 traits::{BaseTypeMethods, BuilderMethods, ConstMethods, DerivedTypeMethods},
10 use rustc_middle::ty::layout::HasTyCtxt;
11 use rustc_middle::ty::Ty;
12 use rustc_target::abi::{Align, Endian, HasDataLayout, LayoutOf, Size};
14 fn round_pointer_up_to_alignment(
15 bx: &mut Builder<'a, 'll, 'tcx>,
20 let mut ptr_as_int = bx.ptrtoint(addr, bx.cx().type_isize());
21 ptr_as_int = bx.add(ptr_as_int, bx.cx().const_i32(align.bytes() as i32 - 1));
22 ptr_as_int = bx.and(ptr_as_int, bx.cx().const_i32(-(align.bytes() as i32)));
23 bx.inttoptr(ptr_as_int, ptr_ty)
26 fn emit_direct_ptr_va_arg(
27 bx: &mut Builder<'a, 'll, 'tcx>,
28 list: OperandRef<'tcx, &'ll Value>,
33 allow_higher_align: bool,
34 ) -> (&'ll Value, Align) {
35 let va_list_ty = bx.type_i8p();
36 let va_list_ptr_ty = bx.type_ptr_to(va_list_ty);
37 let va_list_addr = if list.layout.llvm_type(bx.cx) != va_list_ptr_ty {
38 bx.bitcast(list.immediate(), va_list_ptr_ty)
43 let ptr = bx.load(va_list_ty, va_list_addr, bx.tcx().data_layout.pointer_align.abi);
45 let (addr, addr_align) = if allow_higher_align && align > slot_size {
46 (round_pointer_up_to_alignment(bx, ptr, align, bx.cx().type_i8p()), align)
51 let aligned_size = size.align_to(slot_size).bytes() as i32;
52 let full_direct_size = bx.cx().const_i32(aligned_size);
53 let next = bx.inbounds_gep(bx.type_i8(), addr, &[full_direct_size]);
54 bx.store(next, va_list_addr, bx.tcx().data_layout.pointer_align.abi);
56 if size.bytes() < slot_size.bytes() && bx.tcx().sess.target.endian == Endian::Big {
57 let adjusted_size = bx.cx().const_i32((slot_size.bytes() - size.bytes()) as i32);
58 let adjusted = bx.inbounds_gep(bx.type_i8(), addr, &[adjusted_size]);
59 (bx.bitcast(adjusted, bx.cx().type_ptr_to(llty)), addr_align)
61 (bx.bitcast(addr, bx.cx().type_ptr_to(llty)), addr_align)
66 bx: &mut Builder<'a, 'll, 'tcx>,
67 list: OperandRef<'tcx, &'ll Value>,
71 allow_higher_align: bool,
73 let layout = bx.cx.layout_of(target_ty);
74 let (llty, size, align) = if indirect {
76 bx.cx.layout_of(bx.cx.tcx.mk_imm_ptr(target_ty)).llvm_type(bx.cx),
77 bx.cx.data_layout().pointer_size,
78 bx.cx.data_layout().pointer_align,
81 (layout.llvm_type(bx.cx), layout.size, layout.align)
83 let (addr, addr_align) =
84 emit_direct_ptr_va_arg(bx, list, llty, size, align.abi, slot_size, allow_higher_align);
86 let tmp_ret = bx.load(llty, addr, addr_align);
87 bx.load(bx.cx.layout_of(target_ty).llvm_type(bx.cx), tmp_ret, align.abi)
89 bx.load(llty, addr, addr_align)
94 bx: &mut Builder<'a, 'll, 'tcx>,
95 list: OperandRef<'tcx, &'ll Value>,
98 // Implementation of the AAPCS64 calling convention for va_args see
99 // https://github.com/ARM-software/abi-aa/blob/master/aapcs64/aapcs64.rst
100 let va_list_addr = list.immediate();
101 let va_list_ty = list.deref(bx.cx).layout.llvm_type(bx);
102 let layout = bx.cx.layout_of(target_ty);
104 let mut maybe_reg = bx.build_sibling_block("va_arg.maybe_reg");
105 let mut in_reg = bx.build_sibling_block("va_arg.in_reg");
106 let mut on_stack = bx.build_sibling_block("va_arg.on_stack");
107 let mut end = bx.build_sibling_block("va_arg.end");
108 let zero = bx.const_i32(0);
109 let offset_align = Align::from_bytes(4).unwrap();
111 let gr_type = target_ty.is_any_ptr() || target_ty.is_integral();
112 let (reg_off, reg_top_index, slot_size) = if gr_type {
113 let gr_offs = bx.struct_gep(va_list_ty, va_list_addr, 3);
114 let nreg = (layout.size.bytes() + 7) / 8;
115 (gr_offs, 1, nreg * 8)
117 let vr_off = bx.struct_gep(va_list_ty, va_list_addr, 4);
118 let nreg = (layout.size.bytes() + 15) / 16;
119 (vr_off, 2, nreg * 16)
122 // if the offset >= 0 then the value will be on the stack
123 let mut reg_off_v = bx.load(bx.type_i32(), reg_off, offset_align);
124 let use_stack = bx.icmp(IntPredicate::IntSGE, reg_off_v, zero);
125 bx.cond_br(use_stack, &on_stack.llbb(), &maybe_reg.llbb());
127 // The value at this point might be in a register, but there is a chance that
128 // it could be on the stack so we have to update the offset and then check
131 if gr_type && layout.align.abi.bytes() > 8 {
132 reg_off_v = maybe_reg.add(reg_off_v, bx.const_i32(15));
133 reg_off_v = maybe_reg.and(reg_off_v, bx.const_i32(-16));
135 let new_reg_off_v = maybe_reg.add(reg_off_v, bx.const_i32(slot_size as i32));
137 maybe_reg.store(new_reg_off_v, reg_off, offset_align);
139 // Check to see if we have overflowed the registers as a result of this.
140 // If we have then we need to use the stack for this value
141 let use_stack = maybe_reg.icmp(IntPredicate::IntSGT, new_reg_off_v, zero);
142 maybe_reg.cond_br(use_stack, &on_stack.llbb(), &in_reg.llbb());
144 let top_type = bx.type_i8p();
145 let top = in_reg.struct_gep(va_list_ty, va_list_addr, reg_top_index);
146 let top = in_reg.load(top_type, top, bx.tcx().data_layout.pointer_align.abi);
148 // reg_value = *(@top + reg_off_v);
149 let mut reg_addr = in_reg.gep(bx.type_i8(), top, &[reg_off_v]);
150 if bx.tcx().sess.target.endian == Endian::Big && layout.size.bytes() != slot_size {
151 // On big-endian systems the value is right-aligned in its slot.
152 let offset = bx.const_i32((slot_size - layout.size.bytes()) as i32);
153 reg_addr = in_reg.gep(bx.type_i8(), reg_addr, &[offset]);
155 let reg_type = layout.llvm_type(bx);
156 let reg_addr = in_reg.bitcast(reg_addr, bx.cx.type_ptr_to(reg_type));
157 let reg_value = in_reg.load(reg_type, reg_addr, layout.align.abi);
158 in_reg.br(&end.llbb());
162 emit_ptr_va_arg(&mut on_stack, list, target_ty, false, Align::from_bytes(8).unwrap(), true);
163 on_stack.br(&end.llbb());
166 layout.immediate_llvm_type(bx),
167 &[reg_value, stack_value],
168 &[&in_reg.llbb(), &on_stack.llbb()],
175 pub(super) fn emit_va_arg(
176 bx: &mut Builder<'a, 'll, 'tcx>,
177 addr: OperandRef<'tcx, &'ll Value>,
180 // Determine the va_arg implementation to use. The LLVM va_arg instruction
181 // is lacking in some instances, so we should only use it as a fallback.
182 let target = &bx.cx.tcx.sess.target;
183 let arch = &bx.cx.tcx.sess.target.arch;
186 "x86" if target.is_like_windows => {
187 emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(4).unwrap(), false)
190 "x86" => emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(4).unwrap(), true),
192 "aarch64" if target.is_like_windows => {
193 emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(8).unwrap(), false)
195 // macOS / iOS AArch64
196 "aarch64" if target.is_like_osx => {
197 emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(8).unwrap(), true)
199 "aarch64" => emit_aapcs_va_arg(bx, addr, target_ty),
201 "x86_64" if target.is_like_windows => {
202 let target_ty_size = bx.cx.size_of(target_ty).bytes();
203 let indirect: bool = target_ty_size > 8 || !target_ty_size.is_power_of_two();
204 emit_ptr_va_arg(bx, addr, target_ty, indirect, Align::from_bytes(8).unwrap(), false)
206 // For all other architecture/OS combinations fall back to using
207 // the LLVM va_arg instruction.
208 // https://llvm.org/docs/LangRef.html#va-arg-instruction
209 _ => bx.va_arg(addr.immediate(), bx.cx.layout_of(target_ty).llvm_type(bx.cx)),