1 use crate::builder::Builder;
2 use crate::type_::Type;
3 use crate::type_of::LayoutLlvmExt;
4 use crate::value::Value;
5 use rustc_codegen_ssa::mir::operand::OperandRef;
6 use rustc_codegen_ssa::traits::{
7 BaseTypeMethods, BuilderMethods, ConstMethods, DerivedTypeMethods,
9 use rustc_middle::ty::layout::HasTyCtxt;
10 use rustc_middle::ty::Ty;
11 use rustc_target::abi::{Align, HasDataLayout, LayoutOf, Size};
14 fn round_pointer_up_to_alignment(
15 bx: &mut Builder<'a, 'll, 'tcx>,
20 let mut ptr_as_int = bx.ptrtoint(addr, bx.cx().type_isize());
21 ptr_as_int = bx.add(ptr_as_int, bx.cx().const_i32(align.bytes() as i32 - 1));
22 ptr_as_int = bx.and(ptr_as_int, bx.cx().const_i32(-(align.bytes() as i32)));
23 bx.inttoptr(ptr_as_int, ptr_ty)
26 fn emit_direct_ptr_va_arg(
27 bx: &mut Builder<'a, 'll, 'tcx>,
28 list: OperandRef<'tcx, &'ll Value>,
33 allow_higher_align: bool,
34 ) -> (&'ll Value, Align) {
35 let va_list_ptr_ty = bx.cx().type_ptr_to(bx.cx.type_i8p());
36 let va_list_addr = if list.layout.llvm_type(bx.cx) != va_list_ptr_ty {
37 bx.bitcast(list.immediate(), va_list_ptr_ty)
42 let ptr = bx.load(va_list_addr, bx.tcx().data_layout.pointer_align.abi);
44 let (addr, addr_align) = if allow_higher_align && align > slot_size {
45 (round_pointer_up_to_alignment(bx, ptr, align, bx.cx().type_i8p()), align)
50 let aligned_size = size.align_to(slot_size).bytes() as i32;
51 let full_direct_size = bx.cx().const_i32(aligned_size);
52 let next = bx.inbounds_gep(addr, &[full_direct_size]);
53 bx.store(next, va_list_addr, bx.tcx().data_layout.pointer_align.abi);
55 if size.bytes() < slot_size.bytes() && &*bx.tcx().sess.target.target.target_endian == "big" {
56 let adjusted_size = bx.cx().const_i32((slot_size.bytes() - size.bytes()) as i32);
57 let adjusted = bx.inbounds_gep(addr, &[adjusted_size]);
58 (bx.bitcast(adjusted, bx.cx().type_ptr_to(llty)), addr_align)
60 (bx.bitcast(addr, bx.cx().type_ptr_to(llty)), addr_align)
65 bx: &mut Builder<'a, 'll, 'tcx>,
66 list: OperandRef<'tcx, &'ll Value>,
70 allow_higher_align: bool,
72 let layout = bx.cx.layout_of(target_ty);
73 let (llty, size, align) = if indirect {
75 bx.cx.layout_of(bx.cx.tcx.mk_imm_ptr(target_ty)).llvm_type(bx.cx),
76 bx.cx.data_layout().pointer_size,
77 bx.cx.data_layout().pointer_align,
80 (layout.llvm_type(bx.cx), layout.size, layout.align)
82 let (addr, addr_align) =
83 emit_direct_ptr_va_arg(bx, list, llty, size, align.abi, slot_size, allow_higher_align);
85 let tmp_ret = bx.load(addr, addr_align);
86 bx.load(tmp_ret, align.abi)
88 bx.load(addr, addr_align)
92 pub(super) fn emit_va_arg(
93 bx: &mut Builder<'a, 'll, 'tcx>,
94 addr: OperandRef<'tcx, &'ll Value>,
97 // Determine the va_arg implementation to use. The LLVM va_arg instruction
98 // is lacking in some instances, so we should only use it as a fallback.
99 let target = &bx.cx.tcx.sess.target.target;
100 let arch = &bx.cx.tcx.sess.target.target.arch;
101 match (&**arch, target.options.is_like_windows) {
104 emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(4).unwrap(), false)
108 emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(4).unwrap(), true)
111 ("aarch64", true) => {
112 emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(8).unwrap(), false)
115 ("aarch64", _) if target.target_os == "ios" => {
116 emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(8).unwrap(), true)
119 ("x86_64", true) => {
120 let target_ty_size = bx.cx.size_of(target_ty).bytes();
121 let indirect: bool = target_ty_size > 8 || !target_ty_size.is_power_of_two();
122 emit_ptr_va_arg(bx, addr, target_ty, indirect, Align::from_bytes(8).unwrap(), false)
124 // For all other architecture/OS combinations fall back to using
125 // the LLVM va_arg instruction.
126 // https://llvm.org/docs/LangRef.html#va-arg-instruction
127 _ => bx.va_arg(addr.immediate(), bx.cx.layout_of(target_ty).llvm_type(bx.cx)),