1 use crate::builder::Builder;
2 use crate::type_::Type;
3 use crate::type_of::LayoutLlvmExt;
4 use crate::value::Value;
5 use rustc::ty::layout::{Align, HasDataLayout, HasTyCtxt, LayoutOf, Size};
7 use rustc_codegen_ssa::mir::operand::OperandRef;
8 use rustc_codegen_ssa::traits::{
9 BaseTypeMethods, BuilderMethods, ConstMethods, DerivedTypeMethods,
13 fn round_pointer_up_to_alignment(
14 bx: &mut Builder<'a, 'll, 'tcx>,
19 let mut ptr_as_int = bx.ptrtoint(addr, bx.cx().type_isize());
20 ptr_as_int = bx.add(ptr_as_int, bx.cx().const_i32(align.bytes() as i32 - 1));
21 ptr_as_int = bx.and(ptr_as_int, bx.cx().const_i32(-(align.bytes() as i32)));
22 bx.inttoptr(ptr_as_int, ptr_ty)
25 fn emit_direct_ptr_va_arg(
26 bx: &mut Builder<'a, 'll, 'tcx>,
27 list: OperandRef<'tcx, &'ll Value>,
32 allow_higher_align: bool,
33 ) -> (&'ll Value, Align) {
34 let va_list_ptr_ty = bx.cx().type_ptr_to(bx.cx.type_i8p());
35 let va_list_addr = if list.layout.llvm_type(bx.cx) != va_list_ptr_ty {
36 bx.bitcast(list.immediate(), va_list_ptr_ty)
41 let ptr = bx.load(va_list_addr, bx.tcx().data_layout.pointer_align.abi);
43 let (addr, addr_align) = if allow_higher_align && align > slot_size {
44 (round_pointer_up_to_alignment(bx, ptr, align, bx.cx().type_i8p()), align)
49 let aligned_size = size.align_to(slot_size).bytes() as i32;
50 let full_direct_size = bx.cx().const_i32(aligned_size);
51 let next = bx.inbounds_gep(addr, &[full_direct_size]);
52 bx.store(next, va_list_addr, bx.tcx().data_layout.pointer_align.abi);
54 if size.bytes() < slot_size.bytes() && &*bx.tcx().sess.target.target.target_endian == "big" {
55 let adjusted_size = bx.cx().const_i32((slot_size.bytes() - size.bytes()) as i32);
56 let adjusted = bx.inbounds_gep(addr, &[adjusted_size]);
57 (bx.bitcast(adjusted, bx.cx().type_ptr_to(llty)), addr_align)
59 (bx.bitcast(addr, bx.cx().type_ptr_to(llty)), addr_align)
64 bx: &mut Builder<'a, 'll, 'tcx>,
65 list: OperandRef<'tcx, &'ll Value>,
69 allow_higher_align: bool,
71 let layout = bx.cx.layout_of(target_ty);
72 let (llty, size, align) = if indirect {
74 bx.cx.layout_of(bx.cx.tcx.mk_imm_ptr(target_ty)).llvm_type(bx.cx),
75 bx.cx.data_layout().pointer_size,
76 bx.cx.data_layout().pointer_align,
79 (layout.llvm_type(bx.cx), layout.size, layout.align)
81 let (addr, addr_align) =
82 emit_direct_ptr_va_arg(bx, list, llty, size, align.abi, slot_size, allow_higher_align);
84 let tmp_ret = bx.load(addr, addr_align);
85 bx.load(tmp_ret, align.abi)
87 bx.load(addr, addr_align)
91 pub(super) fn emit_va_arg(
92 bx: &mut Builder<'a, 'll, 'tcx>,
93 addr: OperandRef<'tcx, &'ll Value>,
96 // Determine the va_arg implementation to use. The LLVM va_arg instruction
97 // is lacking in some instances, so we should only use it as a fallback.
98 let target = &bx.cx.tcx.sess.target.target;
99 let arch = &bx.cx.tcx.sess.target.target.arch;
100 match (&**arch, target.options.is_like_windows) {
103 emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(4).unwrap(), false)
107 emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(4).unwrap(), true)
110 ("aarch64", true) => {
111 emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(8).unwrap(), false)
114 ("aarch64", _) if target.target_os == "ios" => {
115 emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(8).unwrap(), true)
118 ("x86_64", true) => {
119 let target_ty_size = bx.cx.size_of(target_ty).bytes();
120 let indirect: bool = target_ty_size > 8 || !target_ty_size.is_power_of_two();
121 emit_ptr_va_arg(bx, addr, target_ty, indirect, Align::from_bytes(8).unwrap(), false)
123 // For all other architecture/OS combinations fall back to using
124 // the LLVM va_arg instruction.
125 // https://llvm.org/docs/LangRef.html#va-arg-instruction
126 _ => bx.va_arg(addr.immediate(), bx.cx.layout_of(target_ty).llvm_type(bx.cx)),