This file is part of Valgrind, a dynamic binary instrumentation
framework.
- Copyright (C) 2000-2011 Julian Seward. All rights reserved.
+ Copyright (C) 2000-2013 Julian Seward. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
|| (__VALGRIND_MAJOR__ == 3 && __VALGRIND_MINOR__ >= 6))
*/
#define __VALGRIND_MAJOR__ 3
-#define __VALGRIND_MINOR__ 6
+#define __VALGRIND_MINOR__ 8
#include <stdarg.h>
#undef PLAT_ppc64_linux
#undef PLAT_arm_linux
#undef PLAT_s390x_linux
+#undef PLAT_mips32_linux
+#undef PLAT_mips64_linux
#if defined(__APPLE__) && defined(__i386__)
# define PLAT_amd64_darwin 1
#elif defined(__MINGW32__) || defined(__CYGWIN32__) \
|| (defined(_WIN32) && defined(_M_IX86))
-# if defined(__x86_64__)
-# define PLAT_amd64_win64 1
-# elif defined(__i386__)
-# define PLAT_x86_win32 1
-# endif
+# define PLAT_x86_win32 1
+#elif defined(__MINGW64__) || (defined(_WIN64) && defined(_M_X64))
+# define PLAT_amd64_win64 1
#elif defined(__linux__) && defined(__i386__)
# define PLAT_x86_linux 1
#elif defined(__linux__) && defined(__x86_64__)
# define PLAT_arm_linux 1
#elif defined(__linux__) && defined(__s390__) && defined(__s390x__)
# define PLAT_s390x_linux 1
+#elif defined(__linux__) && defined(__mips__)
+#if (__mips==64)
+# define PLAT_mips64_linux 1
+#else
+# define PLAT_mips32_linux 1
+#endif
#else
/* If we're not compiling for our target platform, don't generate
any inline asms. */
__SPECIAL_INSTRUCTION_PREAMBLE \
/* call-noredir *%EAX */ \
"xchgl %%edx,%%edx\n\t"
+
+#define VALGRIND_VEX_INJECT_IR() \
+ do { \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ "xchgl %%edi,%%edi\n\t" \
+ : : : "cc", "memory" \
+ ); \
+ } while (0)
+
#endif /* PLAT_x86_linux || PLAT_x86_darwin || (PLAT_x86_win32 && __GNUC__) */
/* ------------------------- x86-Win32 ------------------------- */
#define VALGRIND_CALL_NOREDIR_EAX ERROR
+#define VALGRIND_VEX_INJECT_IR() \
+ do { \
+ __asm { __SPECIAL_INSTRUCTION_PREAMBLE \
+ __asm xchg edi,edi \
+ } \
+ } while (0)
+
#else
#error Unsupported compiler.
#endif
/* ------------------------ amd64-{linux,darwin} --------------- */
-#if defined(PLAT_amd64_linux) || defined(PLAT_amd64_darwin) \
- || defined(PLAT_amd64_win64)
+#if defined(PLAT_amd64_linux) || defined(PLAT_amd64_darwin)
typedef
struct {
__SPECIAL_INSTRUCTION_PREAMBLE \
/* call-noredir *%RAX */ \
"xchgq %%rdx,%%rdx\n\t"
+
+#define VALGRIND_VEX_INJECT_IR() \
+ do { \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ "xchgq %%rdi,%%rdi\n\t" \
+ : : : "cc", "memory" \
+ ); \
+ } while (0)
+
#endif /* PLAT_amd64_linux || PLAT_amd64_darwin */
/* ------------------------ ppc32-linux ------------------------ */
__SPECIAL_INSTRUCTION_PREAMBLE \
/* branch-and-link-to-noredir *%R11 */ \
"or 3,3,3\n\t"
+
+#define VALGRIND_VEX_INJECT_IR() \
+ do { \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ "or 5,5,5\n\t" \
+ ); \
+ } while (0)
+
#endif /* PLAT_ppc32_linux */
/* ------------------------ ppc64-linux ------------------------ */
/* branch-and-link-to-noredir *%R11 */ \
"or 3,3,3\n\t"
+#define VALGRIND_VEX_INJECT_IR() \
+ do { \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ "or 5,5,5\n\t" \
+ ); \
+ } while (0)
+
#endif /* PLAT_ppc64_linux */
/* ------------------------- arm-linux ------------------------- */
/* branch-and-link-to-noredir *%R4 */ \
"orr r12, r12, r12\n\t"
+#define VALGRIND_VEX_INJECT_IR() \
+ do { \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ "orr r9, r9, r9\n\t" \
+ : : : "cc", "memory" \
+ ); \
+ } while (0)
+
#endif /* PLAT_arm_linux */
/* ------------------------ s390x-linux ------------------------ */
#define __CLIENT_REQUEST_CODE "lr 2,2\n\t"
#define __GET_NR_CONTEXT_CODE "lr 3,3\n\t"
#define __CALL_NO_REDIR_CODE "lr 4,4\n\t"
+#define __VEX_INJECT_IR_CODE "lr 5,5\n\t"
#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
_zzq_default, _zzq_request, \
__SPECIAL_INSTRUCTION_PREAMBLE \
__CALL_NO_REDIR_CODE
+#define VALGRIND_VEX_INJECT_IR() \
+ do { \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ __VEX_INJECT_IR_CODE); \
+ } while (0)
+
#endif /* PLAT_s390x_linux */
+/* ------------------------- mips32-linux ---------------- */
+
+#if defined(PLAT_mips32_linux)
+
+typedef
+ struct {
+ unsigned int nraddr; /* where's the code? */
+ }
+ OrigFn;
+
+/* .word 0x342
+ * .word 0x742
+ * .word 0xC2
+ * .word 0x4C2*/
+#define __SPECIAL_INSTRUCTION_PREAMBLE \
+ "srl $0, $0, 13\n\t" \
+ "srl $0, $0, 29\n\t" \
+ "srl $0, $0, 3\n\t" \
+ "srl $0, $0, 19\n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
+ _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ __extension__ \
+ ({ volatile unsigned int _zzq_args[6]; \
+ volatile unsigned int _zzq_result; \
+ _zzq_args[0] = (unsigned int)(_zzq_request); \
+ _zzq_args[1] = (unsigned int)(_zzq_arg1); \
+ _zzq_args[2] = (unsigned int)(_zzq_arg2); \
+ _zzq_args[3] = (unsigned int)(_zzq_arg3); \
+ _zzq_args[4] = (unsigned int)(_zzq_arg4); \
+ _zzq_args[5] = (unsigned int)(_zzq_arg5); \
+ __asm__ volatile("move $11, %1\n\t" /*default*/ \
+ "move $12, %2\n\t" /*ptr*/ \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* T3 = client_request ( T4 ) */ \
+ "or $13, $13, $13\n\t" \
+ "move %0, $11\n\t" /*result*/ \
+ : "=r" (_zzq_result) \
+ : "r" (_zzq_default), "r" (&_zzq_args[0]) \
+ : "$11", "$12"); \
+ _zzq_result; \
+ })
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
+ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
+ volatile unsigned int __addr; \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* %t9 = guest_NRADDR */ \
+ "or $14, $14, $14\n\t" \
+ "move %0, $11" /*result*/ \
+ : "=r" (__addr) \
+ : \
+ : "$11" \
+ ); \
+ _zzq_orig->nraddr = __addr; \
+ }
+
+#define VALGRIND_CALL_NOREDIR_T9 \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* call-noredir *%t9 */ \
+ "or $15, $15, $15\n\t"
+
+#define VALGRIND_VEX_INJECT_IR() \
+ do { \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ "or $11, $11, $11\n\t" \
+ ); \
+ } while (0)
+
+
+#endif /* PLAT_mips32_linux */
+
+/* ------------------------- mips64-linux ---------------- */
+
+#if defined(PLAT_mips64_linux)
+
+typedef
+ struct {
+ unsigned long long nraddr; /* where's the code? */
+ }
+ OrigFn;
+
+/* dsll $0,$0, 3
+ * dsll $0,$0, 13
+ * dsll $0,$0, 29
+ * dsll $0,$0, 19*/
+#define __SPECIAL_INSTRUCTION_PREAMBLE \
+ "dsll $0,$0, 3 ; dsll $0,$0,13\n\t" \
+ "dsll $0,$0,29 ; dsll $0,$0,19\n\t"
+
+#define VALGRIND_DO_CLIENT_REQUEST_EXPR( \
+ _zzq_default, _zzq_request, \
+ _zzq_arg1, _zzq_arg2, _zzq_arg3, _zzq_arg4, _zzq_arg5) \
+ __extension__ \
+ ({ volatile unsigned long long int _zzq_args[6]; \
+ volatile unsigned long long int _zzq_result; \
+ _zzq_args[0] = (unsigned long long int)(_zzq_request); \
+ _zzq_args[1] = (unsigned long long int)(_zzq_arg1); \
+ _zzq_args[2] = (unsigned long long int)(_zzq_arg2); \
+ _zzq_args[3] = (unsigned long long int)(_zzq_arg3); \
+ _zzq_args[4] = (unsigned long long int)(_zzq_arg4); \
+ _zzq_args[5] = (unsigned long long int)(_zzq_arg5); \
+ __asm__ volatile("move $11, %1\n\t" /*default*/ \
+ "move $12, %2\n\t" /*ptr*/ \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* $11 = client_request ( $12 ) */ \
+ "or $13, $13, $13\n\t" \
+ "move %0, $11\n\t" /*result*/ \
+ : "=r" (_zzq_result) \
+ : "r" (_zzq_default), "r" (&_zzq_args[0]) \
+ : "$11", "$12"); \
+ _zzq_result; \
+ })
+
+#define VALGRIND_GET_NR_CONTEXT(_zzq_rlval) \
+ { volatile OrigFn* _zzq_orig = &(_zzq_rlval); \
+ volatile unsigned long long int __addr; \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ /* $11 = guest_NRADDR */ \
+ "or $14, $14, $14\n\t" \
+ "move %0, $11" /*result*/ \
+ : "=r" (__addr) \
+ : \
+ : "$11"); \
+ _zzq_orig->nraddr = __addr; \
+ }
+
+#define VALGRIND_CALL_NOREDIR_T9 \
+ __SPECIAL_INSTRUCTION_PREAMBLE \
+ /* call-noredir $25 */ \
+ "or $15, $15, $15\n\t"
+
+#define VALGRIND_VEX_INJECT_IR() \
+ do { \
+ __asm__ volatile(__SPECIAL_INSTRUCTION_PREAMBLE \
+ "or $11, $11, $11\n\t" \
+ ); \
+ } while (0)
+
+#endif /* PLAT_mips64_linux */
+
/* Insert assembly code for other platforms here... */
#endif /* NVALGRIND */
macros. The type of the argument _lval is OrigFn. */
#define VALGRIND_GET_ORIG_FN(_lval) VALGRIND_GET_NR_CONTEXT(_lval)
+/* Also provide end-user facilities for function replacement, rather
+ than wrapping. A replacement function differs from a wrapper in
+ that it has no way to get hold of the original function being
+ called, and hence no way to call onwards to it. In a replacement
+ function, VALGRIND_GET_ORIG_FN always returns zero. */
+
+#define I_REPLACE_SONAME_FNNAME_ZU(soname,fnname) \
+ VG_CONCAT4(_vgr00000ZU_,soname,_,fnname)
+
+#define I_REPLACE_SONAME_FNNAME_ZZ(soname,fnname) \
+ VG_CONCAT4(_vgr00000ZZ_,soname,_,fnname)
+
/* Derivatives of the main macros below, for calling functions
returning void. */
as gcc can already see that, plus causes gcc to bomb. */
#define __CALLER_SAVED_REGS /*"eax"*/ "ecx", "edx"
+/* Macros to save and align the stack before making a function
+ call and restore it afterwards as gcc may not keep the stack
+ pointer aligned if it doesn't realise calls are being made
+ to other functions. */
+
+#define VALGRIND_ALIGN_STACK \
+ "movl %%esp,%%edi\n\t" \
+ "andl $0xfffffff0,%%esp\n\t"
+#define VALGRIND_RESTORE_STACK \
+ "movl %%edi,%%esp\n\t"
+
/* These CALL_FN_ macros assume that on x86-linux, sizeof(unsigned
long) == 4. */
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"subl $12, %%esp\n\t" \
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
- "addl $16, %%esp\n" \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"subl $8, %%esp\n\t" \
"pushl 8(%%eax)\n\t" \
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
- "addl $16, %%esp\n" \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"subl $4, %%esp\n\t" \
"pushl 12(%%eax)\n\t" \
"pushl 8(%%eax)\n\t" \
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
- "addl $16, %%esp\n" \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"pushl 16(%%eax)\n\t" \
"pushl 12(%%eax)\n\t" \
"pushl 8(%%eax)\n\t" \
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
- "addl $16, %%esp\n" \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"subl $12, %%esp\n\t" \
"pushl 20(%%eax)\n\t" \
"pushl 16(%%eax)\n\t" \
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
- "addl $32, %%esp\n" \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"subl $8, %%esp\n\t" \
"pushl 24(%%eax)\n\t" \
"pushl 20(%%eax)\n\t" \
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
- "addl $32, %%esp\n" \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"subl $4, %%esp\n\t" \
"pushl 28(%%eax)\n\t" \
"pushl 24(%%eax)\n\t" \
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
- "addl $32, %%esp\n" \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
_argvec[7] = (unsigned long)(arg7); \
_argvec[8] = (unsigned long)(arg8); \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"pushl 32(%%eax)\n\t" \
"pushl 28(%%eax)\n\t" \
"pushl 24(%%eax)\n\t" \
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
- "addl $32, %%esp\n" \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
_argvec[8] = (unsigned long)(arg8); \
_argvec[9] = (unsigned long)(arg9); \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"subl $12, %%esp\n\t" \
"pushl 36(%%eax)\n\t" \
"pushl 32(%%eax)\n\t" \
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
- "addl $48, %%esp\n" \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
_argvec[9] = (unsigned long)(arg9); \
_argvec[10] = (unsigned long)(arg10); \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"subl $8, %%esp\n\t" \
"pushl 40(%%eax)\n\t" \
"pushl 36(%%eax)\n\t" \
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
- "addl $48, %%esp\n" \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
_argvec[10] = (unsigned long)(arg10); \
_argvec[11] = (unsigned long)(arg11); \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"subl $4, %%esp\n\t" \
"pushl 44(%%eax)\n\t" \
"pushl 40(%%eax)\n\t" \
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
- "addl $48, %%esp\n" \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
_argvec[11] = (unsigned long)(arg11); \
_argvec[12] = (unsigned long)(arg12); \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"pushl 48(%%eax)\n\t" \
"pushl 44(%%eax)\n\t" \
"pushl 40(%%eax)\n\t" \
"pushl 4(%%eax)\n\t" \
"movl (%%eax), %%eax\n\t" /* target->%eax */ \
VALGRIND_CALL_NOREDIR_EAX \
- "addl $48, %%esp\n" \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=a" (_res) \
: /*in*/ "a" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "edi" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
# define VALGRIND_CFI_EPILOGUE
#endif
+/* Macros to save and align the stack before making a function
+ call and restore it afterwards as gcc may not keep the stack
+ pointer aligned if it doesn't realise calls are being made
+ to other functions. */
+
+#define VALGRIND_ALIGN_STACK \
+ "movq %%rsp,%%r14\n\t" \
+ "andq $0xfffffffffffffff0,%%rsp\n\t"
+#define VALGRIND_RESTORE_STACK \
+ "movq %%r14,%%rsp\n\t"
/* These CALL_FN_ macros assume that on amd64-linux, sizeof(unsigned
long) == 8. */
with the stack pointer doesn't give a danger of non-unwindable
stack. */
+#define CALL_FN_W_v(lval, orig) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[1]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ VALGRIND_ALIGN_STACK \
+ "subq $128,%%rsp\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ VALGRIND_RESTORE_STACK \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[2]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ VALGRIND_ALIGN_STACK \
+ "subq $128,%%rsp\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ VALGRIND_RESTORE_STACK \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ VALGRIND_ALIGN_STACK \
+ "subq $128,%%rsp\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ VALGRIND_RESTORE_STACK \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[4]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ VALGRIND_ALIGN_STACK \
+ "subq $128,%%rsp\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ VALGRIND_RESTORE_STACK \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[5]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ VALGRIND_ALIGN_STACK \
+ "subq $128,%%rsp\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ VALGRIND_RESTORE_STACK \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[6]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ VALGRIND_ALIGN_STACK \
+ "subq $128,%%rsp\n\t" \
+ "movq 40(%%rax), %%r8\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ VALGRIND_RESTORE_STACK \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[7]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ VALGRIND_ALIGN_STACK \
+ "subq $128,%%rsp\n\t" \
+ "movq 48(%%rax), %%r9\n\t" \
+ "movq 40(%%rax), %%r8\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ VALGRIND_RESTORE_STACK \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[8]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ VALGRIND_ALIGN_STACK \
+ "subq $136,%%rsp\n\t" \
+ "pushq 56(%%rax)\n\t" \
+ "movq 48(%%rax), %%r9\n\t" \
+ "movq 40(%%rax), %%r8\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ VALGRIND_RESTORE_STACK \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[9]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ VALGRIND_ALIGN_STACK \
+ "subq $128,%%rsp\n\t" \
+ "pushq 64(%%rax)\n\t" \
+ "pushq 56(%%rax)\n\t" \
+ "movq 48(%%rax), %%r9\n\t" \
+ "movq 40(%%rax), %%r8\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ VALGRIND_RESTORE_STACK \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[10]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ VALGRIND_ALIGN_STACK \
+ "subq $136,%%rsp\n\t" \
+ "pushq 72(%%rax)\n\t" \
+ "pushq 64(%%rax)\n\t" \
+ "pushq 56(%%rax)\n\t" \
+ "movq 48(%%rax), %%r9\n\t" \
+ "movq 40(%%rax), %%r8\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ VALGRIND_RESTORE_STACK \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[11]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ VALGRIND_ALIGN_STACK \
+ "subq $128,%%rsp\n\t" \
+ "pushq 80(%%rax)\n\t" \
+ "pushq 72(%%rax)\n\t" \
+ "pushq 64(%%rax)\n\t" \
+ "pushq 56(%%rax)\n\t" \
+ "movq 48(%%rax), %%r9\n\t" \
+ "movq 40(%%rax), %%r8\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ VALGRIND_RESTORE_STACK \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10,arg11) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[12]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ _argvec[11] = (unsigned long)(arg11); \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ VALGRIND_ALIGN_STACK \
+ "subq $136,%%rsp\n\t" \
+ "pushq 88(%%rax)\n\t" \
+ "pushq 80(%%rax)\n\t" \
+ "pushq 72(%%rax)\n\t" \
+ "pushq 64(%%rax)\n\t" \
+ "pushq 56(%%rax)\n\t" \
+ "movq 48(%%rax), %%r9\n\t" \
+ "movq 40(%%rax), %%r8\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ VALGRIND_RESTORE_STACK \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10,arg11,arg12) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[13]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ _argvec[11] = (unsigned long)(arg11); \
+ _argvec[12] = (unsigned long)(arg12); \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ VALGRIND_ALIGN_STACK \
+ "subq $128,%%rsp\n\t" \
+ "pushq 96(%%rax)\n\t" \
+ "pushq 88(%%rax)\n\t" \
+ "pushq 80(%%rax)\n\t" \
+ "pushq 72(%%rax)\n\t" \
+ "pushq 64(%%rax)\n\t" \
+ "pushq 56(%%rax)\n\t" \
+ "movq 48(%%rax), %%r9\n\t" \
+ "movq 40(%%rax), %%r8\n\t" \
+ "movq 32(%%rax), %%rcx\n\t" \
+ "movq 24(%%rax), %%rdx\n\t" \
+ "movq 16(%%rax), %%rsi\n\t" \
+ "movq 8(%%rax), %%rdi\n\t" \
+ "movq (%%rax), %%rax\n\t" /* target->%rax */ \
+ VALGRIND_CALL_NOREDIR_RAX \
+ VALGRIND_RESTORE_STACK \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=a" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r14", "r15" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#endif /* PLAT_amd64_linux || PLAT_amd64_darwin */
+
+/* ------------------------ ppc32-linux ------------------------ */
+
+#if defined(PLAT_ppc32_linux)
+
+/* This is useful for finding out about the on-stack stuff:
+
+ extern int f9 ( int,int,int,int,int,int,int,int,int );
+ extern int f10 ( int,int,int,int,int,int,int,int,int,int );
+ extern int f11 ( int,int,int,int,int,int,int,int,int,int,int );
+ extern int f12 ( int,int,int,int,int,int,int,int,int,int,int,int );
+
+ int g9 ( void ) {
+ return f9(11,22,33,44,55,66,77,88,99);
+ }
+ int g10 ( void ) {
+ return f10(11,22,33,44,55,66,77,88,99,110);
+ }
+ int g11 ( void ) {
+ return f11(11,22,33,44,55,66,77,88,99,110,121);
+ }
+ int g12 ( void ) {
+ return f12(11,22,33,44,55,66,77,88,99,110,121,132);
+ }
+*/
+
+/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
+
+/* These regs are trashed by the hidden call. */
+#define __CALLER_SAVED_REGS \
+ "lr", "ctr", "xer", \
+ "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \
+ "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \
+ "r11", "r12", "r13"
+
+/* Macros to save and align the stack before making a function
+ call and restore it afterwards as gcc may not keep the stack
+ pointer aligned if it doesn't realise calls are being made
+ to other functions. */
+
+#define VALGRIND_ALIGN_STACK \
+ "mr 28,1\n\t" \
+ "rlwinm 1,1,0,0,27\n\t"
+#define VALGRIND_RESTORE_STACK \
+ "mr 1,28\n\t"
+
+/* These CALL_FN_ macros assume that on ppc32-linux,
+ sizeof(unsigned long) == 4. */
+
#define CALL_FN_W_v(lval, orig) \
do { \
volatile OrigFn _orig = (orig); \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
__asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "subq $128,%%rsp\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- "addq $128,%%rsp\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
+ VALGRIND_ALIGN_STACK \
+ "mr 11,%1\n\t" \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ VALGRIND_RESTORE_STACK \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
volatile unsigned long _argvec[2]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
+ _argvec[1] = (unsigned long)arg1; \
__asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "subq $128,%%rsp\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- "addq $128,%%rsp\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
+ VALGRIND_ALIGN_STACK \
+ "mr 11,%1\n\t" \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ VALGRIND_RESTORE_STACK \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
volatile unsigned long _argvec[3]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
__asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "subq $128,%%rsp\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- "addq $128,%%rsp\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
+ VALGRIND_ALIGN_STACK \
+ "mr 11,%1\n\t" \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ VALGRIND_RESTORE_STACK \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
volatile unsigned long _argvec[4]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
__asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "subq $128,%%rsp\n\t" \
- "movq 24(%%rax), %%rdx\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- "addq $128,%%rsp\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
+ VALGRIND_ALIGN_STACK \
+ "mr 11,%1\n\t" \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 5,12(11)\n\t" \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ VALGRIND_RESTORE_STACK \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
volatile unsigned long _argvec[5]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
__asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "subq $128,%%rsp\n\t" \
- "movq 32(%%rax), %%rcx\n\t" \
- "movq 24(%%rax), %%rdx\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- "addq $128,%%rsp\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
+ VALGRIND_ALIGN_STACK \
+ "mr 11,%1\n\t" \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 5,12(11)\n\t" \
+ "lwz 6,16(11)\n\t" /* arg4->r6 */ \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ VALGRIND_RESTORE_STACK \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
volatile unsigned long _argvec[6]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
__asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "subq $128,%%rsp\n\t" \
- "movq 40(%%rax), %%r8\n\t" \
- "movq 32(%%rax), %%rcx\n\t" \
- "movq 24(%%rax), %%rdx\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- "addq $128,%%rsp\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
+ VALGRIND_ALIGN_STACK \
+ "mr 11,%1\n\t" \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 5,12(11)\n\t" \
+ "lwz 6,16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7,20(11)\n\t" \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ VALGRIND_RESTORE_STACK \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
volatile unsigned long _argvec[7]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
__asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "subq $128,%%rsp\n\t" \
- "movq 48(%%rax), %%r9\n\t" \
- "movq 40(%%rax), %%r8\n\t" \
- "movq 32(%%rax), %%rcx\n\t" \
- "movq 24(%%rax), %%rdx\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- "addq $128,%%rsp\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
+ VALGRIND_ALIGN_STACK \
+ "mr 11,%1\n\t" \
+ "lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 5,12(11)\n\t" \
+ "lwz 6,16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7,20(11)\n\t" \
+ "lwz 8,24(11)\n\t" \
+ "lwz 11,0(11)\n\t" /* target->r11 */ \
+ VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ VALGRIND_RESTORE_STACK \
+ "mr %0,3" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
volatile unsigned long _argvec[8]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "subq $136,%%rsp\n\t" \
- "pushq 56(%%rax)\n\t" \
- "movq 48(%%rax), %%r9\n\t" \
- "movq 40(%%rax), %%r8\n\t" \
- "movq 32(%%rax), %%rcx\n\t" \
- "movq 24(%%rax), %%rdx\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- "addq $8, %%rsp\n" \
- "addq $136,%%rsp\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[9]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "subq $128,%%rsp\n\t" \
- "pushq 64(%%rax)\n\t" \
- "pushq 56(%%rax)\n\t" \
- "movq 48(%%rax), %%r9\n\t" \
- "movq 40(%%rax), %%r8\n\t" \
- "movq 32(%%rax), %%rcx\n\t" \
- "movq 24(%%rax), %%rdx\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- "addq $16, %%rsp\n" \
- "addq $128,%%rsp\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[10]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "subq $136,%%rsp\n\t" \
- "pushq 72(%%rax)\n\t" \
- "pushq 64(%%rax)\n\t" \
- "pushq 56(%%rax)\n\t" \
- "movq 48(%%rax), %%r9\n\t" \
- "movq 40(%%rax), %%r8\n\t" \
- "movq 32(%%rax), %%rcx\n\t" \
- "movq 24(%%rax), %%rdx\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- "addq $24, %%rsp\n" \
- "addq $136,%%rsp\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[11]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- _argvec[10] = (unsigned long)(arg10); \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "subq $128,%%rsp\n\t" \
- "pushq 80(%%rax)\n\t" \
- "pushq 72(%%rax)\n\t" \
- "pushq 64(%%rax)\n\t" \
- "pushq 56(%%rax)\n\t" \
- "movq 48(%%rax), %%r9\n\t" \
- "movq 40(%%rax), %%r8\n\t" \
- "movq 32(%%rax), %%rcx\n\t" \
- "movq 24(%%rax), %%rdx\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- "addq $32, %%rsp\n" \
- "addq $128,%%rsp\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10,arg11) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[12]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- _argvec[10] = (unsigned long)(arg10); \
- _argvec[11] = (unsigned long)(arg11); \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "subq $136,%%rsp\n\t" \
- "pushq 88(%%rax)\n\t" \
- "pushq 80(%%rax)\n\t" \
- "pushq 72(%%rax)\n\t" \
- "pushq 64(%%rax)\n\t" \
- "pushq 56(%%rax)\n\t" \
- "movq 48(%%rax), %%r9\n\t" \
- "movq 40(%%rax), %%r8\n\t" \
- "movq 32(%%rax), %%rcx\n\t" \
- "movq 24(%%rax), %%rdx\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- "addq $40, %%rsp\n" \
- "addq $136,%%rsp\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10,arg11,arg12) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[13]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)(arg1); \
- _argvec[2] = (unsigned long)(arg2); \
- _argvec[3] = (unsigned long)(arg3); \
- _argvec[4] = (unsigned long)(arg4); \
- _argvec[5] = (unsigned long)(arg5); \
- _argvec[6] = (unsigned long)(arg6); \
- _argvec[7] = (unsigned long)(arg7); \
- _argvec[8] = (unsigned long)(arg8); \
- _argvec[9] = (unsigned long)(arg9); \
- _argvec[10] = (unsigned long)(arg10); \
- _argvec[11] = (unsigned long)(arg11); \
- _argvec[12] = (unsigned long)(arg12); \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "subq $128,%%rsp\n\t" \
- "pushq 96(%%rax)\n\t" \
- "pushq 88(%%rax)\n\t" \
- "pushq 80(%%rax)\n\t" \
- "pushq 72(%%rax)\n\t" \
- "pushq 64(%%rax)\n\t" \
- "pushq 56(%%rax)\n\t" \
- "movq 48(%%rax), %%r9\n\t" \
- "movq 40(%%rax), %%r8\n\t" \
- "movq 32(%%rax), %%rcx\n\t" \
- "movq 24(%%rax), %%rdx\n\t" \
- "movq 16(%%rax), %%rsi\n\t" \
- "movq 8(%%rax), %%rdi\n\t" \
- "movq (%%rax), %%rax\n\t" /* target->%rax */ \
- VALGRIND_CALL_NOREDIR_RAX \
- "addq $48, %%rsp\n" \
- "addq $128,%%rsp\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=a" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r15" \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#endif /* PLAT_amd64_linux || PLAT_amd64_darwin */
-
-/* ------------------------ ppc32-linux ------------------------ */
-
-#if defined(PLAT_ppc32_linux)
-
-/* This is useful for finding out about the on-stack stuff:
-
- extern int f9 ( int,int,int,int,int,int,int,int,int );
- extern int f10 ( int,int,int,int,int,int,int,int,int,int );
- extern int f11 ( int,int,int,int,int,int,int,int,int,int,int );
- extern int f12 ( int,int,int,int,int,int,int,int,int,int,int,int );
-
- int g9 ( void ) {
- return f9(11,22,33,44,55,66,77,88,99);
- }
- int g10 ( void ) {
- return f10(11,22,33,44,55,66,77,88,99,110);
- }
- int g11 ( void ) {
- return f11(11,22,33,44,55,66,77,88,99,110,121);
- }
- int g12 ( void ) {
- return f12(11,22,33,44,55,66,77,88,99,110,121,132);
- }
-*/
-
-/* ARGREGS: r3 r4 r5 r6 r7 r8 r9 r10 (the rest on stack somewhere) */
-
-/* These regs are trashed by the hidden call. */
-#define __CALLER_SAVED_REGS \
- "lr", "ctr", "xer", \
- "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \
- "r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \
- "r11", "r12", "r13"
-
-/* These CALL_FN_ macros assume that on ppc32-linux,
- sizeof(unsigned long) == 4. */
-
-#define CALL_FN_W_v(lval, orig) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[1]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- "lwz 11,0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr %0,3" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_W(lval, orig, arg1) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[2]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ _argvec[7] = (unsigned long)arg7; \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"lwz 3,4(11)\n\t" /* arg1->r3 */ \
+ "lwz 4,8(11)\n\t" \
+ "lwz 5,12(11)\n\t" \
+ "lwz 6,16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7,20(11)\n\t" \
+ "lwz 8,24(11)\n\t" \
+ "lwz 9,28(11)\n\t" \
"lwz 11,0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ VALGRIND_RESTORE_STACK \
"mr %0,3" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
-#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8) \
do { \
volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[3]; \
+ volatile unsigned long _argvec[9]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)arg1; \
_argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ _argvec[7] = (unsigned long)arg7; \
+ _argvec[8] = (unsigned long)arg8; \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"lwz 3,4(11)\n\t" /* arg1->r3 */ \
"lwz 4,8(11)\n\t" \
+ "lwz 5,12(11)\n\t" \
+ "lwz 6,16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7,20(11)\n\t" \
+ "lwz 8,24(11)\n\t" \
+ "lwz 9,28(11)\n\t" \
+ "lwz 10,32(11)\n\t" /* arg8->r10 */ \
"lwz 11,0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ VALGRIND_RESTORE_STACK \
"mr %0,3" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
-#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9) \
do { \
volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[4]; \
+ volatile unsigned long _argvec[10]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)arg1; \
_argvec[2] = (unsigned long)arg2; \
_argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ _argvec[7] = (unsigned long)arg7; \
+ _argvec[8] = (unsigned long)arg8; \
+ _argvec[9] = (unsigned long)arg9; \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
+ "addi 1,1,-16\n\t" \
+ /* arg9 */ \
+ "lwz 3,36(11)\n\t" \
+ "stw 3,8(1)\n\t" \
+ /* args1-8 */ \
"lwz 3,4(11)\n\t" /* arg1->r3 */ \
"lwz 4,8(11)\n\t" \
"lwz 5,12(11)\n\t" \
+ "lwz 6,16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7,20(11)\n\t" \
+ "lwz 8,24(11)\n\t" \
+ "lwz 9,28(11)\n\t" \
+ "lwz 10,32(11)\n\t" /* arg8->r10 */ \
"lwz 11,0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ VALGRIND_RESTORE_STACK \
"mr %0,3" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
-#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10) \
do { \
volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[5]; \
+ volatile unsigned long _argvec[11]; \
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)arg1; \
_argvec[2] = (unsigned long)arg2; \
_argvec[3] = (unsigned long)arg3; \
_argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ _argvec[7] = (unsigned long)arg7; \
+ _argvec[8] = (unsigned long)arg8; \
+ _argvec[9] = (unsigned long)arg9; \
+ _argvec[10] = (unsigned long)arg10; \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
+ "addi 1,1,-16\n\t" \
+ /* arg10 */ \
+ "lwz 3,40(11)\n\t" \
+ "stw 3,12(1)\n\t" \
+ /* arg9 */ \
+ "lwz 3,36(11)\n\t" \
+ "stw 3,8(1)\n\t" \
+ /* args1-8 */ \
"lwz 3,4(11)\n\t" /* arg1->r3 */ \
"lwz 4,8(11)\n\t" \
"lwz 5,12(11)\n\t" \
"lwz 6,16(11)\n\t" /* arg4->r6 */ \
+ "lwz 7,20(11)\n\t" \
+ "lwz 8,24(11)\n\t" \
+ "lwz 9,28(11)\n\t" \
+ "lwz 10,32(11)\n\t" /* arg8->r10 */ \
"lwz 11,0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
+ VALGRIND_RESTORE_STACK \
"mr %0,3" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[6]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- "lwz 3,4(11)\n\t" /* arg1->r3 */ \
- "lwz 4,8(11)\n\t" \
- "lwz 5,12(11)\n\t" \
- "lwz 6,16(11)\n\t" /* arg4->r6 */ \
- "lwz 7,20(11)\n\t" \
- "lwz 11,0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr %0,3" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[7]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- _argvec[6] = (unsigned long)arg6; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- "lwz 3,4(11)\n\t" /* arg1->r3 */ \
- "lwz 4,8(11)\n\t" \
- "lwz 5,12(11)\n\t" \
- "lwz 6,16(11)\n\t" /* arg4->r6 */ \
- "lwz 7,20(11)\n\t" \
- "lwz 8,24(11)\n\t" \
- "lwz 11,0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr %0,3" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[8]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- _argvec[6] = (unsigned long)arg6; \
- _argvec[7] = (unsigned long)arg7; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- "lwz 3,4(11)\n\t" /* arg1->r3 */ \
- "lwz 4,8(11)\n\t" \
- "lwz 5,12(11)\n\t" \
- "lwz 6,16(11)\n\t" /* arg4->r6 */ \
- "lwz 7,20(11)\n\t" \
- "lwz 8,24(11)\n\t" \
- "lwz 9,28(11)\n\t" \
- "lwz 11,0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr %0,3" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[9]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- _argvec[6] = (unsigned long)arg6; \
- _argvec[7] = (unsigned long)arg7; \
- _argvec[8] = (unsigned long)arg8; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- "lwz 3,4(11)\n\t" /* arg1->r3 */ \
- "lwz 4,8(11)\n\t" \
- "lwz 5,12(11)\n\t" \
- "lwz 6,16(11)\n\t" /* arg4->r6 */ \
- "lwz 7,20(11)\n\t" \
- "lwz 8,24(11)\n\t" \
- "lwz 9,28(11)\n\t" \
- "lwz 10,32(11)\n\t" /* arg8->r10 */ \
- "lwz 11,0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "mr %0,3" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[10]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- _argvec[6] = (unsigned long)arg6; \
- _argvec[7] = (unsigned long)arg7; \
- _argvec[8] = (unsigned long)arg8; \
- _argvec[9] = (unsigned long)arg9; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- "addi 1,1,-16\n\t" \
- /* arg9 */ \
- "lwz 3,36(11)\n\t" \
- "stw 3,8(1)\n\t" \
- /* args1-8 */ \
- "lwz 3,4(11)\n\t" /* arg1->r3 */ \
- "lwz 4,8(11)\n\t" \
- "lwz 5,12(11)\n\t" \
- "lwz 6,16(11)\n\t" /* arg4->r6 */ \
- "lwz 7,20(11)\n\t" \
- "lwz 8,24(11)\n\t" \
- "lwz 9,28(11)\n\t" \
- "lwz 10,32(11)\n\t" /* arg8->r10 */ \
- "lwz 11,0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "addi 1,1,16\n\t" \
- "mr %0,3" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
- ); \
- lval = (__typeof__(lval)) _res; \
- } while (0)
-
-#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
- arg7,arg8,arg9,arg10) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[11]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- _argvec[6] = (unsigned long)arg6; \
- _argvec[7] = (unsigned long)arg7; \
- _argvec[8] = (unsigned long)arg8; \
- _argvec[9] = (unsigned long)arg9; \
- _argvec[10] = (unsigned long)arg10; \
- __asm__ volatile( \
- "mr 11,%1\n\t" \
- "addi 1,1,-16\n\t" \
- /* arg10 */ \
- "lwz 3,40(11)\n\t" \
- "stw 3,12(1)\n\t" \
- /* arg9 */ \
- "lwz 3,36(11)\n\t" \
- "stw 3,8(1)\n\t" \
- /* args1-8 */ \
- "lwz 3,4(11)\n\t" /* arg1->r3 */ \
- "lwz 4,8(11)\n\t" \
- "lwz 5,12(11)\n\t" \
- "lwz 6,16(11)\n\t" /* arg4->r6 */ \
- "lwz 7,20(11)\n\t" \
- "lwz 8,24(11)\n\t" \
- "lwz 9,28(11)\n\t" \
- "lwz 10,32(11)\n\t" /* arg8->r10 */ \
- "lwz 11,0(11)\n\t" /* target->r11 */ \
- VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "addi 1,1,16\n\t" \
- "mr %0,3" \
- : /*out*/ "=r" (_res) \
- : /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
_argvec[10] = (unsigned long)arg10; \
_argvec[11] = (unsigned long)arg11; \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"addi 1,1,-32\n\t" \
/* arg11 */ \
"lwz 10,32(11)\n\t" /* arg8->r10 */ \
"lwz 11,0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "addi 1,1,32\n\t" \
+ VALGRIND_RESTORE_STACK \
"mr %0,3" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
_argvec[11] = (unsigned long)arg11; \
_argvec[12] = (unsigned long)arg12; \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"addi 1,1,-32\n\t" \
/* arg12 */ \
"lwz 10,32(11)\n\t" /* arg8->r10 */ \
"lwz 11,0(11)\n\t" /* target->r11 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
- "addi 1,1,32\n\t" \
+ VALGRIND_RESTORE_STACK \
"mr %0,3" \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
"r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \
"r11", "r12", "r13"
+/* Macros to save and align the stack before making a function
+ call and restore it afterwards as gcc may not keep the stack
+ pointer aligned if it doesn't realise calls are being made
+ to other functions. */
+
+#define VALGRIND_ALIGN_STACK \
+ "mr 28,1\n\t" \
+ "rldicr 1,1,0,59\n\t"
+#define VALGRIND_RESTORE_STACK \
+ "mr 1,28\n\t"
+
/* These CALL_FN_ macros assume that on ppc64-linux, sizeof(unsigned
long) == 8. */
_argvec[1] = (unsigned long)_orig.r2; \
_argvec[2] = (unsigned long)_orig.nraddr; \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"std 2,-16(11)\n\t" /* save tocptr */ \
"ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
"mr 11,%1\n\t" \
"mr %0,3\n\t" \
- "ld 2,-16(11)" /* restore tocptr */ \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
_argvec[2] = (unsigned long)_orig.nraddr; \
_argvec[2+1] = (unsigned long)arg1; \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"std 2,-16(11)\n\t" /* save tocptr */ \
"ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
"mr 11,%1\n\t" \
"mr %0,3\n\t" \
- "ld 2,-16(11)" /* restore tocptr */ \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
_argvec[2+1] = (unsigned long)arg1; \
_argvec[2+2] = (unsigned long)arg2; \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"std 2,-16(11)\n\t" /* save tocptr */ \
"ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
"mr 11,%1\n\t" \
"mr %0,3\n\t" \
- "ld 2,-16(11)" /* restore tocptr */ \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
_argvec[2+2] = (unsigned long)arg2; \
_argvec[2+3] = (unsigned long)arg3; \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"std 2,-16(11)\n\t" /* save tocptr */ \
"ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
"mr 11,%1\n\t" \
"mr %0,3\n\t" \
- "ld 2,-16(11)" /* restore tocptr */ \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
_argvec[2+3] = (unsigned long)arg3; \
_argvec[2+4] = (unsigned long)arg4; \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"std 2,-16(11)\n\t" /* save tocptr */ \
"ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
"mr 11,%1\n\t" \
"mr %0,3\n\t" \
- "ld 2,-16(11)" /* restore tocptr */ \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
_argvec[2+4] = (unsigned long)arg4; \
_argvec[2+5] = (unsigned long)arg5; \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"std 2,-16(11)\n\t" /* save tocptr */ \
"ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
"mr 11,%1\n\t" \
"mr %0,3\n\t" \
- "ld 2,-16(11)" /* restore tocptr */ \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
_argvec[2+5] = (unsigned long)arg5; \
_argvec[2+6] = (unsigned long)arg6; \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"std 2,-16(11)\n\t" /* save tocptr */ \
"ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
"mr 11,%1\n\t" \
"mr %0,3\n\t" \
- "ld 2,-16(11)" /* restore tocptr */ \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
_argvec[2+6] = (unsigned long)arg6; \
_argvec[2+7] = (unsigned long)arg7; \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"std 2,-16(11)\n\t" /* save tocptr */ \
"ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
"mr 11,%1\n\t" \
"mr %0,3\n\t" \
- "ld 2,-16(11)" /* restore tocptr */ \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
_argvec[2+7] = (unsigned long)arg7; \
_argvec[2+8] = (unsigned long)arg8; \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"std 2,-16(11)\n\t" /* save tocptr */ \
"ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R11 \
"mr 11,%1\n\t" \
"mr %0,3\n\t" \
- "ld 2,-16(11)" /* restore tocptr */ \
+ "ld 2,-16(11)\n\t" /* restore tocptr */ \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
_argvec[2+8] = (unsigned long)arg8; \
_argvec[2+9] = (unsigned long)arg9; \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"std 2,-16(11)\n\t" /* save tocptr */ \
"ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
"mr 11,%1\n\t" \
"mr %0,3\n\t" \
"ld 2,-16(11)\n\t" /* restore tocptr */ \
- "addi 1,1,128" /* restore frame */ \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
_argvec[2+9] = (unsigned long)arg9; \
_argvec[2+10] = (unsigned long)arg10; \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"std 2,-16(11)\n\t" /* save tocptr */ \
"ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
"mr 11,%1\n\t" \
"mr %0,3\n\t" \
"ld 2,-16(11)\n\t" /* restore tocptr */ \
- "addi 1,1,128" /* restore frame */ \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
_argvec[2+10] = (unsigned long)arg10; \
_argvec[2+11] = (unsigned long)arg11; \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"std 2,-16(11)\n\t" /* save tocptr */ \
"ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
"mr 11,%1\n\t" \
"mr %0,3\n\t" \
"ld 2,-16(11)\n\t" /* restore tocptr */ \
- "addi 1,1,144" /* restore frame */ \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
_argvec[2+11] = (unsigned long)arg11; \
_argvec[2+12] = (unsigned long)arg12; \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"mr 11,%1\n\t" \
"std 2,-16(11)\n\t" /* save tocptr */ \
"ld 2,-8(11)\n\t" /* use nraddr's tocptr */ \
"mr 11,%1\n\t" \
"mr %0,3\n\t" \
"ld 2,-16(11)\n\t" /* restore tocptr */ \
- "addi 1,1,144" /* restore frame */ \
+ VALGRIND_RESTORE_STACK \
: /*out*/ "=r" (_res) \
: /*in*/ "r" (&_argvec[2]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r28" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
/* These regs are trashed by the hidden call. */
#define __CALLER_SAVED_REGS "r0", "r1", "r2", "r3","r4","r14"
+/* Macros to save and align the stack before making a function
+ call and restore it afterwards as gcc may not keep the stack
+ pointer aligned if it doesn't realise calls are being made
+ to other functions. */
+
+/* This is a bit tricky. We store the original stack pointer in r10
+ as it is callee-saves. gcc doesn't allow the use of r11 for some
+ reason. Also, we can't directly "bic" the stack pointer in thumb
+ mode since r13 isn't an allowed register number in that context.
+ So use r4 as a temporary, since that is about to get trashed
+ anyway, just after each use of this macro. Side effect is we need
+ to be very careful about any future changes, since
+ VALGRIND_ALIGN_STACK simply assumes r4 is usable. */
+#define VALGRIND_ALIGN_STACK \
+ "mov r10, sp\n\t" \
+ "mov r4, sp\n\t" \
+ "bic r4, r4, #7\n\t" \
+ "mov sp, r4\n\t"
+#define VALGRIND_RESTORE_STACK \
+ "mov sp, r10\n\t"
+
/* These CALL_FN_ macros assume that on arm-linux, sizeof(unsigned
long) == 4. */
volatile unsigned long _res; \
_argvec[0] = (unsigned long)_orig.nraddr; \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"ldr r4, [%1] \n\t" /* target->r4 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ VALGRIND_RESTORE_STACK \
"mov %0, r0\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
_argvec[0] = (unsigned long)_orig.nraddr; \
_argvec[1] = (unsigned long)(arg1); \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"ldr r0, [%1, #4] \n\t" \
"ldr r4, [%1] \n\t" /* target->r4 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ VALGRIND_RESTORE_STACK \
"mov %0, r0\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
_argvec[1] = (unsigned long)(arg1); \
_argvec[2] = (unsigned long)(arg2); \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"ldr r0, [%1, #4] \n\t" \
"ldr r1, [%1, #8] \n\t" \
"ldr r4, [%1] \n\t" /* target->r4 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ VALGRIND_RESTORE_STACK \
"mov %0, r0\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
_argvec[2] = (unsigned long)(arg2); \
_argvec[3] = (unsigned long)(arg3); \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"ldr r0, [%1, #4] \n\t" \
"ldr r1, [%1, #8] \n\t" \
"ldr r2, [%1, #12] \n\t" \
"ldr r4, [%1] \n\t" /* target->r4 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ VALGRIND_RESTORE_STACK \
"mov %0, r0\n" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
_argvec[3] = (unsigned long)(arg3); \
_argvec[4] = (unsigned long)(arg4); \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"ldr r0, [%1, #4] \n\t" \
"ldr r1, [%1, #8] \n\t" \
"ldr r2, [%1, #12] \n\t" \
"ldr r3, [%1, #16] \n\t" \
"ldr r4, [%1] \n\t" /* target->r4 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
+ VALGRIND_RESTORE_STACK \
"mov %0, r0" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
_argvec[4] = (unsigned long)(arg4); \
_argvec[5] = (unsigned long)(arg5); \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
+ "sub sp, sp, #4 \n\t" \
"ldr r0, [%1, #20] \n\t" \
"push {r0} \n\t" \
"ldr r0, [%1, #4] \n\t" \
"ldr r3, [%1, #16] \n\t" \
"ldr r4, [%1] \n\t" /* target->r4 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
- "add sp, sp, #4 \n\t" \
+ VALGRIND_RESTORE_STACK \
"mov %0, r0" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
_argvec[5] = (unsigned long)(arg5); \
_argvec[6] = (unsigned long)(arg6); \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"ldr r0, [%1, #20] \n\t" \
"ldr r1, [%1, #24] \n\t" \
"push {r0, r1} \n\t" \
"ldr r3, [%1, #16] \n\t" \
"ldr r4, [%1] \n\t" /* target->r4 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
- "add sp, sp, #8 \n\t" \
+ VALGRIND_RESTORE_STACK \
"mov %0, r0" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
_argvec[6] = (unsigned long)(arg6); \
_argvec[7] = (unsigned long)(arg7); \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
+ "sub sp, sp, #4 \n\t" \
"ldr r0, [%1, #20] \n\t" \
"ldr r1, [%1, #24] \n\t" \
"ldr r2, [%1, #28] \n\t" \
"ldr r3, [%1, #16] \n\t" \
"ldr r4, [%1] \n\t" /* target->r4 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
- "add sp, sp, #12 \n\t" \
+ VALGRIND_RESTORE_STACK \
"mov %0, r0" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
_argvec[7] = (unsigned long)(arg7); \
_argvec[8] = (unsigned long)(arg8); \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"ldr r0, [%1, #20] \n\t" \
"ldr r1, [%1, #24] \n\t" \
"ldr r2, [%1, #28] \n\t" \
"ldr r3, [%1, #16] \n\t" \
"ldr r4, [%1] \n\t" /* target->r4 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
- "add sp, sp, #16 \n\t" \
+ VALGRIND_RESTORE_STACK \
"mov %0, r0" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
_argvec[8] = (unsigned long)(arg8); \
_argvec[9] = (unsigned long)(arg9); \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
+ "sub sp, sp, #4 \n\t" \
"ldr r0, [%1, #20] \n\t" \
"ldr r1, [%1, #24] \n\t" \
"ldr r2, [%1, #28] \n\t" \
"ldr r3, [%1, #16] \n\t" \
"ldr r4, [%1] \n\t" /* target->r4 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
- "add sp, sp, #20 \n\t" \
+ VALGRIND_RESTORE_STACK \
"mov %0, r0" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
_argvec[9] = (unsigned long)(arg9); \
_argvec[10] = (unsigned long)(arg10); \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"ldr r0, [%1, #40] \n\t" \
"push {r0} \n\t" \
"ldr r0, [%1, #20] \n\t" \
"ldr r3, [%1, #16] \n\t" \
"ldr r4, [%1] \n\t" /* target->r4 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
- "add sp, sp, #24 \n\t" \
+ VALGRIND_RESTORE_STACK \
"mov %0, r0" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
_argvec[10] = (unsigned long)(arg10); \
_argvec[11] = (unsigned long)(arg11); \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
+ "sub sp, sp, #4 \n\t" \
"ldr r0, [%1, #40] \n\t" \
"ldr r1, [%1, #44] \n\t" \
"push {r0, r1} \n\t" \
"ldr r3, [%1, #16] \n\t" \
"ldr r4, [%1] \n\t" /* target->r4 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
- "add sp, sp, #28 \n\t" \
+ VALGRIND_RESTORE_STACK \
"mov %0, r0" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory",__CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
_argvec[11] = (unsigned long)(arg11); \
_argvec[12] = (unsigned long)(arg12); \
__asm__ volatile( \
+ VALGRIND_ALIGN_STACK \
"ldr r0, [%1, #40] \n\t" \
"ldr r1, [%1, #44] \n\t" \
"ldr r2, [%1, #48] \n\t" \
"ldr r3, [%1, #16] \n\t" \
"ldr r4, [%1] \n\t" /* target->r4 */ \
VALGRIND_BRANCH_AND_LINK_TO_NOREDIR_R4 \
- "add sp, sp, #32 \n\t" \
+ VALGRIND_RESTORE_STACK \
"mov %0, r0" \
: /*out*/ "=r" (_res) \
: /*in*/ "0" (&_argvec[0]) \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS, "r10" \
); \
lval = (__typeof__(lval)) _res; \
} while (0)
# define VALGRIND_CFI_EPILOGUE
#endif
-
-
+/* Nb: On s390 the stack pointer is properly aligned *at all times*
+ according to the s390 GCC maintainer. (The ABI specification is not
+ precise in this regard.) Therefore, VALGRIND_ALIGN_STACK and
+ VALGRIND_RESTORE_STACK are not defined here. */
/* These regs are trashed by the hidden call. Note that we overwrite
r14 in s390_irgen_noredir (VEX/priv/guest_s390_irgen.c) to give the
#define __CALLER_SAVED_REGS "0","1","2","3","4","5","14", \
"f0","f1","f2","f3","f4","f5","f6","f7"
+/* Nb: Although r11 is modified in the asm snippets below (inside
+ VALGRIND_CFI_PROLOGUE) it is not listed in the clobber section, for
+ two reasons:
+ (1) r11 is restored in VALGRIND_CFI_EPILOGUE, so effectively it is not
+ modified
+ (2) GCC will complain that r11 cannot appear inside a clobber section,
+ when compiled with -O -fno-omit-frame-pointer
+ */
#define CALL_FN_W_v(lval, orig) \
do { \
lval = (__typeof__(lval)) _res; \
} while (0)
-#define CALL_FN_W_10W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
- arg6, arg7 ,arg8, arg9, arg10) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[11]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- _argvec[6] = (unsigned long)arg6; \
- _argvec[7] = (unsigned long)arg7; \
- _argvec[8] = (unsigned long)arg8; \
- _argvec[9] = (unsigned long)arg9; \
- _argvec[10] = (unsigned long)arg10; \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "aghi 15,-200\n\t" \
- "lg 2, 8(1)\n\t" \
- "lg 3,16(1)\n\t" \
- "lg 4,24(1)\n\t" \
- "lg 5,32(1)\n\t" \
- "lg 6,40(1)\n\t" \
- "mvc 160(8,15), 48(1)\n\t" \
- "mvc 168(8,15), 56(1)\n\t" \
- "mvc 176(8,15), 64(1)\n\t" \
- "mvc 184(8,15), 72(1)\n\t" \
- "mvc 192(8,15), 80(1)\n\t" \
- "lg 1, 0(1)\n\t" \
- VALGRIND_CALL_NOREDIR_R1 \
- "lgr %0, 2\n\t" \
- "aghi 15,200\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=d" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
- ); \
- lval = (__typeof__(lval)) _res; \
+#define CALL_FN_W_10W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
+ arg6, arg7 ,arg8, arg9, arg10) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[11]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ _argvec[7] = (unsigned long)arg7; \
+ _argvec[8] = (unsigned long)arg8; \
+ _argvec[9] = (unsigned long)arg9; \
+ _argvec[10] = (unsigned long)arg10; \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "aghi 15,-200\n\t" \
+ "lg 2, 8(1)\n\t" \
+ "lg 3,16(1)\n\t" \
+ "lg 4,24(1)\n\t" \
+ "lg 5,32(1)\n\t" \
+ "lg 6,40(1)\n\t" \
+ "mvc 160(8,15), 48(1)\n\t" \
+ "mvc 168(8,15), 56(1)\n\t" \
+ "mvc 176(8,15), 64(1)\n\t" \
+ "mvc 184(8,15), 72(1)\n\t" \
+ "mvc 192(8,15), 80(1)\n\t" \
+ "lg 1, 0(1)\n\t" \
+ VALGRIND_CALL_NOREDIR_R1 \
+ "lgr %0, 2\n\t" \
+ "aghi 15,200\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=d" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
+ arg6, arg7 ,arg8, arg9, arg10, arg11) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[12]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ _argvec[7] = (unsigned long)arg7; \
+ _argvec[8] = (unsigned long)arg8; \
+ _argvec[9] = (unsigned long)arg9; \
+ _argvec[10] = (unsigned long)arg10; \
+ _argvec[11] = (unsigned long)arg11; \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "aghi 15,-208\n\t" \
+ "lg 2, 8(1)\n\t" \
+ "lg 3,16(1)\n\t" \
+ "lg 4,24(1)\n\t" \
+ "lg 5,32(1)\n\t" \
+ "lg 6,40(1)\n\t" \
+ "mvc 160(8,15), 48(1)\n\t" \
+ "mvc 168(8,15), 56(1)\n\t" \
+ "mvc 176(8,15), 64(1)\n\t" \
+ "mvc 184(8,15), 72(1)\n\t" \
+ "mvc 192(8,15), 80(1)\n\t" \
+ "mvc 200(8,15), 88(1)\n\t" \
+ "lg 1, 0(1)\n\t" \
+ VALGRIND_CALL_NOREDIR_R1 \
+ "lgr %0, 2\n\t" \
+ "aghi 15,208\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=d" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
+ arg6, arg7 ,arg8, arg9, arg10, arg11, arg12)\
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[13]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)arg1; \
+ _argvec[2] = (unsigned long)arg2; \
+ _argvec[3] = (unsigned long)arg3; \
+ _argvec[4] = (unsigned long)arg4; \
+ _argvec[5] = (unsigned long)arg5; \
+ _argvec[6] = (unsigned long)arg6; \
+ _argvec[7] = (unsigned long)arg7; \
+ _argvec[8] = (unsigned long)arg8; \
+ _argvec[9] = (unsigned long)arg9; \
+ _argvec[10] = (unsigned long)arg10; \
+ _argvec[11] = (unsigned long)arg11; \
+ _argvec[12] = (unsigned long)arg12; \
+ __asm__ volatile( \
+ VALGRIND_CFI_PROLOGUE \
+ "aghi 15,-216\n\t" \
+ "lg 2, 8(1)\n\t" \
+ "lg 3,16(1)\n\t" \
+ "lg 4,24(1)\n\t" \
+ "lg 5,32(1)\n\t" \
+ "lg 6,40(1)\n\t" \
+ "mvc 160(8,15), 48(1)\n\t" \
+ "mvc 168(8,15), 56(1)\n\t" \
+ "mvc 176(8,15), 64(1)\n\t" \
+ "mvc 184(8,15), 72(1)\n\t" \
+ "mvc 192(8,15), 80(1)\n\t" \
+ "mvc 200(8,15), 88(1)\n\t" \
+ "mvc 208(8,15), 96(1)\n\t" \
+ "lg 1, 0(1)\n\t" \
+ VALGRIND_CALL_NOREDIR_R1 \
+ "lgr %0, 2\n\t" \
+ "aghi 15,216\n\t" \
+ VALGRIND_CFI_EPILOGUE \
+ : /*out*/ "=d" (_res) \
+ : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
+ : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+
+#endif /* PLAT_s390x_linux */
+
+/* ------------------------- mips32-linux ----------------------- */
+
+#if defined(PLAT_mips32_linux)
+
+/* These regs are trashed by the hidden call. */
+#define __CALLER_SAVED_REGS "$2", "$3", "$4", "$5", "$6", \
+"$7", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
+"$25", "$31"
+
+/* These CALL_FN_ macros assume that on mips-linux, sizeof(unsigned
+ long) == 4. */
+
+#define CALL_FN_W_v(lval, orig) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[1]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ __asm__ volatile( \
+ "subu $29, $29, 8 \n\t" \
+ "sw $28, 0($29) \n\t" \
+ "sw $31, 4($29) \n\t" \
+ "subu $29, $29, 16 \n\t" \
+ "lw $25, 0(%1) \n\t" /* target->t9 */ \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "addu $29, $29, 16\n\t" \
+ "lw $28, 0($29) \n\t" \
+ "lw $31, 4($29) \n\t" \
+ "addu $29, $29, 8 \n\t" \
+ "move %0, $2\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[2]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ __asm__ volatile( \
+ "subu $29, $29, 8 \n\t" \
+ "sw $28, 0($29) \n\t" \
+ "sw $31, 4($29) \n\t" \
+ "subu $29, $29, 16 \n\t" \
+ "lw $4, 4(%1) \n\t" /* arg1*/ \
+ "lw $25, 0(%1) \n\t" /* target->t9 */ \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "addu $29, $29, 16 \n\t" \
+ "lw $28, 0($29) \n\t" \
+ "lw $31, 4($29) \n\t" \
+ "addu $29, $29, 8 \n\t" \
+ "move %0, $2\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ __asm__ volatile( \
+ "subu $29, $29, 8 \n\t" \
+ "sw $28, 0($29) \n\t" \
+ "sw $31, 4($29) \n\t" \
+ "subu $29, $29, 16 \n\t" \
+ "lw $4, 4(%1) \n\t" \
+ "lw $5, 8(%1) \n\t" \
+ "lw $25, 0(%1) \n\t" /* target->t9 */ \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "addu $29, $29, 16 \n\t" \
+ "lw $28, 0($29) \n\t" \
+ "lw $31, 4($29) \n\t" \
+ "addu $29, $29, 8 \n\t" \
+ "move %0, $2\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[4]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ __asm__ volatile( \
+ "subu $29, $29, 8 \n\t" \
+ "sw $28, 0($29) \n\t" \
+ "sw $31, 4($29) \n\t" \
+ "subu $29, $29, 16 \n\t" \
+ "lw $4, 4(%1) \n\t" \
+ "lw $5, 8(%1) \n\t" \
+ "lw $6, 12(%1) \n\t" \
+ "lw $25, 0(%1) \n\t" /* target->t9 */ \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "addu $29, $29, 16 \n\t" \
+ "lw $28, 0($29) \n\t" \
+ "lw $31, 4($29) \n\t" \
+ "addu $29, $29, 8 \n\t" \
+ "move %0, $2\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[5]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ __asm__ volatile( \
+ "subu $29, $29, 8 \n\t" \
+ "sw $28, 0($29) \n\t" \
+ "sw $31, 4($29) \n\t" \
+ "subu $29, $29, 16 \n\t" \
+ "lw $4, 4(%1) \n\t" \
+ "lw $5, 8(%1) \n\t" \
+ "lw $6, 12(%1) \n\t" \
+ "lw $7, 16(%1) \n\t" \
+ "lw $25, 0(%1) \n\t" /* target->t9 */ \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "addu $29, $29, 16 \n\t" \
+ "lw $28, 0($29) \n\t" \
+ "lw $31, 4($29) \n\t" \
+ "addu $29, $29, 8 \n\t" \
+ "move %0, $2\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[6]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ __asm__ volatile( \
+ "subu $29, $29, 8 \n\t" \
+ "sw $28, 0($29) \n\t" \
+ "sw $31, 4($29) \n\t" \
+ "lw $4, 20(%1) \n\t" \
+ "subu $29, $29, 24\n\t" \
+ "sw $4, 16($29) \n\t" \
+ "lw $4, 4(%1) \n\t" \
+ "lw $5, 8(%1) \n\t" \
+ "lw $6, 12(%1) \n\t" \
+ "lw $7, 16(%1) \n\t" \
+ "lw $25, 0(%1) \n\t" /* target->t9 */ \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "addu $29, $29, 24 \n\t" \
+ "lw $28, 0($29) \n\t" \
+ "lw $31, 4($29) \n\t" \
+ "addu $29, $29, 8 \n\t" \
+ "move %0, $2\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[7]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ __asm__ volatile( \
+ "subu $29, $29, 8 \n\t" \
+ "sw $28, 0($29) \n\t" \
+ "sw $31, 4($29) \n\t" \
+ "lw $4, 20(%1) \n\t" \
+ "subu $29, $29, 32\n\t" \
+ "sw $4, 16($29) \n\t" \
+ "lw $4, 24(%1) \n\t" \
+ "nop\n\t" \
+ "sw $4, 20($29) \n\t" \
+ "lw $4, 4(%1) \n\t" \
+ "lw $5, 8(%1) \n\t" \
+ "lw $6, 12(%1) \n\t" \
+ "lw $7, 16(%1) \n\t" \
+ "lw $25, 0(%1) \n\t" /* target->t9 */ \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "addu $29, $29, 32 \n\t" \
+ "lw $28, 0($29) \n\t" \
+ "lw $31, 4($29) \n\t" \
+ "addu $29, $29, 8 \n\t" \
+ "move %0, $2\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[8]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ __asm__ volatile( \
+ "subu $29, $29, 8 \n\t" \
+ "sw $28, 0($29) \n\t" \
+ "sw $31, 4($29) \n\t" \
+ "lw $4, 20(%1) \n\t" \
+ "subu $29, $29, 32\n\t" \
+ "sw $4, 16($29) \n\t" \
+ "lw $4, 24(%1) \n\t" \
+ "sw $4, 20($29) \n\t" \
+ "lw $4, 28(%1) \n\t" \
+ "sw $4, 24($29) \n\t" \
+ "lw $4, 4(%1) \n\t" \
+ "lw $5, 8(%1) \n\t" \
+ "lw $6, 12(%1) \n\t" \
+ "lw $7, 16(%1) \n\t" \
+ "lw $25, 0(%1) \n\t" /* target->t9 */ \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "addu $29, $29, 32 \n\t" \
+ "lw $28, 0($29) \n\t" \
+ "lw $31, 4($29) \n\t" \
+ "addu $29, $29, 8 \n\t" \
+ "move %0, $2\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[9]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ __asm__ volatile( \
+ "subu $29, $29, 8 \n\t" \
+ "sw $28, 0($29) \n\t" \
+ "sw $31, 4($29) \n\t" \
+ "lw $4, 20(%1) \n\t" \
+ "subu $29, $29, 40\n\t" \
+ "sw $4, 16($29) \n\t" \
+ "lw $4, 24(%1) \n\t" \
+ "sw $4, 20($29) \n\t" \
+ "lw $4, 28(%1) \n\t" \
+ "sw $4, 24($29) \n\t" \
+ "lw $4, 32(%1) \n\t" \
+ "sw $4, 28($29) \n\t" \
+ "lw $4, 4(%1) \n\t" \
+ "lw $5, 8(%1) \n\t" \
+ "lw $6, 12(%1) \n\t" \
+ "lw $7, 16(%1) \n\t" \
+ "lw $25, 0(%1) \n\t" /* target->t9 */ \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "addu $29, $29, 40 \n\t" \
+ "lw $28, 0($29) \n\t" \
+ "lw $31, 4($29) \n\t" \
+ "addu $29, $29, 8 \n\t" \
+ "move %0, $2\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[10]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ __asm__ volatile( \
+ "subu $29, $29, 8 \n\t" \
+ "sw $28, 0($29) \n\t" \
+ "sw $31, 4($29) \n\t" \
+ "lw $4, 20(%1) \n\t" \
+ "subu $29, $29, 40\n\t" \
+ "sw $4, 16($29) \n\t" \
+ "lw $4, 24(%1) \n\t" \
+ "sw $4, 20($29) \n\t" \
+ "lw $4, 28(%1) \n\t" \
+ "sw $4, 24($29) \n\t" \
+ "lw $4, 32(%1) \n\t" \
+ "sw $4, 28($29) \n\t" \
+ "lw $4, 36(%1) \n\t" \
+ "sw $4, 32($29) \n\t" \
+ "lw $4, 4(%1) \n\t" \
+ "lw $5, 8(%1) \n\t" \
+ "lw $6, 12(%1) \n\t" \
+ "lw $7, 16(%1) \n\t" \
+ "lw $25, 0(%1) \n\t" /* target->t9 */ \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "addu $29, $29, 40 \n\t" \
+ "lw $28, 0($29) \n\t" \
+ "lw $31, 4($29) \n\t" \
+ "addu $29, $29, 8 \n\t" \
+ "move %0, $2\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[11]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ __asm__ volatile( \
+ "subu $29, $29, 8 \n\t" \
+ "sw $28, 0($29) \n\t" \
+ "sw $31, 4($29) \n\t" \
+ "lw $4, 20(%1) \n\t" \
+ "subu $29, $29, 48\n\t" \
+ "sw $4, 16($29) \n\t" \
+ "lw $4, 24(%1) \n\t" \
+ "sw $4, 20($29) \n\t" \
+ "lw $4, 28(%1) \n\t" \
+ "sw $4, 24($29) \n\t" \
+ "lw $4, 32(%1) \n\t" \
+ "sw $4, 28($29) \n\t" \
+ "lw $4, 36(%1) \n\t" \
+ "sw $4, 32($29) \n\t" \
+ "lw $4, 40(%1) \n\t" \
+ "sw $4, 36($29) \n\t" \
+ "lw $4, 4(%1) \n\t" \
+ "lw $5, 8(%1) \n\t" \
+ "lw $6, 12(%1) \n\t" \
+ "lw $7, 16(%1) \n\t" \
+ "lw $25, 0(%1) \n\t" /* target->t9 */ \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "addu $29, $29, 48 \n\t" \
+ "lw $28, 0($29) \n\t" \
+ "lw $31, 4($29) \n\t" \
+ "addu $29, $29, 8 \n\t" \
+ "move %0, $2\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
+ arg6,arg7,arg8,arg9,arg10, \
+ arg11) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[12]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ _argvec[11] = (unsigned long)(arg11); \
+ __asm__ volatile( \
+ "subu $29, $29, 8 \n\t" \
+ "sw $28, 0($29) \n\t" \
+ "sw $31, 4($29) \n\t" \
+ "lw $4, 20(%1) \n\t" \
+ "subu $29, $29, 48\n\t" \
+ "sw $4, 16($29) \n\t" \
+ "lw $4, 24(%1) \n\t" \
+ "sw $4, 20($29) \n\t" \
+ "lw $4, 28(%1) \n\t" \
+ "sw $4, 24($29) \n\t" \
+ "lw $4, 32(%1) \n\t" \
+ "sw $4, 28($29) \n\t" \
+ "lw $4, 36(%1) \n\t" \
+ "sw $4, 32($29) \n\t" \
+ "lw $4, 40(%1) \n\t" \
+ "sw $4, 36($29) \n\t" \
+ "lw $4, 44(%1) \n\t" \
+ "sw $4, 40($29) \n\t" \
+ "lw $4, 4(%1) \n\t" \
+ "lw $5, 8(%1) \n\t" \
+ "lw $6, 12(%1) \n\t" \
+ "lw $7, 16(%1) \n\t" \
+ "lw $25, 0(%1) \n\t" /* target->t9 */ \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "addu $29, $29, 48 \n\t" \
+ "lw $28, 0($29) \n\t" \
+ "lw $31, 4($29) \n\t" \
+ "addu $29, $29, 8 \n\t" \
+ "move %0, $2\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
+ arg6,arg7,arg8,arg9,arg10, \
+ arg11,arg12) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[13]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ _argvec[11] = (unsigned long)(arg11); \
+ _argvec[12] = (unsigned long)(arg12); \
+ __asm__ volatile( \
+ "subu $29, $29, 8 \n\t" \
+ "sw $28, 0($29) \n\t" \
+ "sw $31, 4($29) \n\t" \
+ "lw $4, 20(%1) \n\t" \
+ "subu $29, $29, 56\n\t" \
+ "sw $4, 16($29) \n\t" \
+ "lw $4, 24(%1) \n\t" \
+ "sw $4, 20($29) \n\t" \
+ "lw $4, 28(%1) \n\t" \
+ "sw $4, 24($29) \n\t" \
+ "lw $4, 32(%1) \n\t" \
+ "sw $4, 28($29) \n\t" \
+ "lw $4, 36(%1) \n\t" \
+ "sw $4, 32($29) \n\t" \
+ "lw $4, 40(%1) \n\t" \
+ "sw $4, 36($29) \n\t" \
+ "lw $4, 44(%1) \n\t" \
+ "sw $4, 40($29) \n\t" \
+ "lw $4, 48(%1) \n\t" \
+ "sw $4, 44($29) \n\t" \
+ "lw $4, 4(%1) \n\t" \
+ "lw $5, 8(%1) \n\t" \
+ "lw $6, 12(%1) \n\t" \
+ "lw $7, 16(%1) \n\t" \
+ "lw $25, 0(%1) \n\t" /* target->t9 */ \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "addu $29, $29, 56 \n\t" \
+ "lw $28, 0($29) \n\t" \
+ "lw $31, 4($29) \n\t" \
+ "addu $29, $29, 8 \n\t" \
+ "move %0, $2\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#endif /* PLAT_mips32_linux */
+
+/* ------------------------- mips64-linux ------------------------- */
+
+#if defined(PLAT_mips64_linux)
+
+/* These regs are trashed by the hidden call. */
+#define __CALLER_SAVED_REGS "$2", "$3", "$4", "$5", "$6", \
+"$7", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", \
+"$25", "$31"
+
+/* These CALL_FN_ macros assume that on mips-linux, sizeof(unsigned
+ long) == 4. */
+
+#define CALL_FN_W_v(lval, orig) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[1]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ __asm__ volatile( \
+ "ld $25, 0(%1)\n\t" /* target->t9 */ \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "move %0, $2\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "0" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_W(lval, orig, arg1) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[2]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ __asm__ volatile( \
+ "ld $4, 8(%1)\n\t" /* arg1*/ \
+ "ld $25, 0(%1)\n\t" /* target->t9 */ \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "move %0, $2\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WW(lval, orig, arg1,arg2) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[3]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ __asm__ volatile( \
+ "ld $4, 8(%1)\n\t" \
+ "ld $5, 16(%1)\n\t" \
+ "ld $25, 0(%1)\n\t" /* target->t9 */ \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "move %0, $2\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWW(lval, orig, arg1,arg2,arg3) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[4]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ __asm__ volatile( \
+ "ld $4, 8(%1)\n\t" \
+ "ld $5, 16(%1)\n\t" \
+ "ld $6, 24(%1)\n\t" \
+ "ld $25, 0(%1)\n\t" /* target->t9 */ \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "move %0, $2\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_WWWW(lval, orig, arg1,arg2,arg3,arg4) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[5]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ __asm__ volatile( \
+ "ld $4, 8(%1)\n\t" \
+ "ld $5, 16(%1)\n\t" \
+ "ld $6, 24(%1)\n\t" \
+ "ld $7, 32(%1)\n\t" \
+ "ld $25, 0(%1)\n\t" /* target->t9 */ \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "move %0, $2\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_5W(lval, orig, arg1,arg2,arg3,arg4,arg5) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[6]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ __asm__ volatile( \
+ "ld $4, 8(%1)\n\t" \
+ "ld $5, 16(%1)\n\t" \
+ "ld $6, 24(%1)\n\t" \
+ "ld $7, 32(%1)\n\t" \
+ "ld $8, 40(%1)\n\t" \
+ "ld $25, 0(%1)\n\t" /* target->t9 */ \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "move %0, $2\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_6W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[7]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ __asm__ volatile( \
+ "ld $4, 8(%1)\n\t" \
+ "ld $5, 16(%1)\n\t" \
+ "ld $6, 24(%1)\n\t" \
+ "ld $7, 32(%1)\n\t" \
+ "ld $8, 40(%1)\n\t" \
+ "ld $9, 48(%1)\n\t" \
+ "ld $25, 0(%1)\n\t" /* target->t9 */ \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "move %0, $2\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_7W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[8]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ __asm__ volatile( \
+ "ld $4, 8(%1)\n\t" \
+ "ld $5, 16(%1)\n\t" \
+ "ld $6, 24(%1)\n\t" \
+ "ld $7, 32(%1)\n\t" \
+ "ld $8, 40(%1)\n\t" \
+ "ld $9, 48(%1)\n\t" \
+ "ld $10, 56(%1)\n\t" \
+ "ld $25, 0(%1) \n\t" /* target->t9 */ \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "move %0, $2\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_8W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[9]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ __asm__ volatile( \
+ "ld $4, 8(%1)\n\t" \
+ "ld $5, 16(%1)\n\t" \
+ "ld $6, 24(%1)\n\t" \
+ "ld $7, 32(%1)\n\t" \
+ "ld $8, 40(%1)\n\t" \
+ "ld $9, 48(%1)\n\t" \
+ "ld $10, 56(%1)\n\t" \
+ "ld $11, 64(%1)\n\t" \
+ "ld $25, 0(%1) \n\t" /* target->t9 */ \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "move %0, $2\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
} while (0)
-#define CALL_FN_W_11W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
- arg6, arg7 ,arg8, arg9, arg10, arg11) \
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[12]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- _argvec[6] = (unsigned long)arg6; \
- _argvec[7] = (unsigned long)arg7; \
- _argvec[8] = (unsigned long)arg8; \
- _argvec[9] = (unsigned long)arg9; \
- _argvec[10] = (unsigned long)arg10; \
- _argvec[11] = (unsigned long)arg11; \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "aghi 15,-208\n\t" \
- "lg 2, 8(1)\n\t" \
- "lg 3,16(1)\n\t" \
- "lg 4,24(1)\n\t" \
- "lg 5,32(1)\n\t" \
- "lg 6,40(1)\n\t" \
- "mvc 160(8,15), 48(1)\n\t" \
- "mvc 168(8,15), 56(1)\n\t" \
- "mvc 176(8,15), 64(1)\n\t" \
- "mvc 184(8,15), 72(1)\n\t" \
- "mvc 192(8,15), 80(1)\n\t" \
- "mvc 200(8,15), 88(1)\n\t" \
- "lg 1, 0(1)\n\t" \
- VALGRIND_CALL_NOREDIR_R1 \
- "lgr %0, 2\n\t" \
- "aghi 15,208\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=d" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
- ); \
- lval = (__typeof__(lval)) _res; \
+#define CALL_FN_W_9W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[10]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ __asm__ volatile( \
+ "dsubu $29, $29, 8\n\t" \
+ "ld $4, 72(%1)\n\t" \
+ "sd $4, 0($29)\n\t" \
+ "ld $4, 8(%1)\n\t" \
+ "ld $5, 16(%1)\n\t" \
+ "ld $6, 24(%1)\n\t" \
+ "ld $7, 32(%1)\n\t" \
+ "ld $8, 40(%1)\n\t" \
+ "ld $9, 48(%1)\n\t" \
+ "ld $10, 56(%1)\n\t" \
+ "ld $11, 64(%1)\n\t" \
+ "ld $25, 0(%1)\n\t" /* target->t9 */ \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "daddu $29, $29, 8\n\t" \
+ "move %0, $2\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
} while (0)
-#define CALL_FN_W_12W(lval, orig, arg1, arg2, arg3, arg4, arg5, \
- arg6, arg7 ,arg8, arg9, arg10, arg11, arg12)\
- do { \
- volatile OrigFn _orig = (orig); \
- volatile unsigned long _argvec[13]; \
- volatile unsigned long _res; \
- _argvec[0] = (unsigned long)_orig.nraddr; \
- _argvec[1] = (unsigned long)arg1; \
- _argvec[2] = (unsigned long)arg2; \
- _argvec[3] = (unsigned long)arg3; \
- _argvec[4] = (unsigned long)arg4; \
- _argvec[5] = (unsigned long)arg5; \
- _argvec[6] = (unsigned long)arg6; \
- _argvec[7] = (unsigned long)arg7; \
- _argvec[8] = (unsigned long)arg8; \
- _argvec[9] = (unsigned long)arg9; \
- _argvec[10] = (unsigned long)arg10; \
- _argvec[11] = (unsigned long)arg11; \
- _argvec[12] = (unsigned long)arg12; \
- __asm__ volatile( \
- VALGRIND_CFI_PROLOGUE \
- "aghi 15,-216\n\t" \
- "lg 2, 8(1)\n\t" \
- "lg 3,16(1)\n\t" \
- "lg 4,24(1)\n\t" \
- "lg 5,32(1)\n\t" \
- "lg 6,40(1)\n\t" \
- "mvc 160(8,15), 48(1)\n\t" \
- "mvc 168(8,15), 56(1)\n\t" \
- "mvc 176(8,15), 64(1)\n\t" \
- "mvc 184(8,15), 72(1)\n\t" \
- "mvc 192(8,15), 80(1)\n\t" \
- "mvc 200(8,15), 88(1)\n\t" \
- "mvc 208(8,15), 96(1)\n\t" \
- "lg 1, 0(1)\n\t" \
- VALGRIND_CALL_NOREDIR_R1 \
- "lgr %0, 2\n\t" \
- "aghi 15,216\n\t" \
- VALGRIND_CFI_EPILOGUE \
- : /*out*/ "=d" (_res) \
- : /*in*/ "a" (&_argvec[0]) __FRAME_POINTER \
- : /*trash*/ "cc", "memory", __CALLER_SAVED_REGS,"6","7" \
- ); \
- lval = (__typeof__(lval)) _res; \
+#define CALL_FN_W_10W(lval, orig, arg1,arg2,arg3,arg4,arg5,arg6, \
+ arg7,arg8,arg9,arg10) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[11]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ __asm__ volatile( \
+ "dsubu $29, $29, 16\n\t" \
+ "ld $4, 72(%1)\n\t" \
+ "sd $4, 0($29)\n\t" \
+ "ld $4, 80(%1)\n\t" \
+ "sd $4, 8($29)\n\t" \
+ "ld $4, 8(%1)\n\t" \
+ "ld $5, 16(%1)\n\t" \
+ "ld $6, 24(%1)\n\t" \
+ "ld $7, 32(%1)\n\t" \
+ "ld $8, 40(%1)\n\t" \
+ "ld $9, 48(%1)\n\t" \
+ "ld $10, 56(%1)\n\t" \
+ "ld $11, 64(%1)\n\t" \
+ "ld $25, 0(%1)\n\t" /* target->t9 */ \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "daddu $29, $29, 16\n\t" \
+ "move %0, $2\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
+
+#define CALL_FN_W_11W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
+ arg6,arg7,arg8,arg9,arg10, \
+ arg11) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[12]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ _argvec[11] = (unsigned long)(arg11); \
+ __asm__ volatile( \
+ "dsubu $29, $29, 24\n\t" \
+ "ld $4, 72(%1)\n\t" \
+ "sd $4, 0($29)\n\t" \
+ "ld $4, 80(%1)\n\t" \
+ "sd $4, 8($29)\n\t" \
+ "ld $4, 88(%1)\n\t" \
+ "sd $4, 16($29)\n\t" \
+ "ld $4, 8(%1)\n\t" \
+ "ld $5, 16(%1)\n\t" \
+ "ld $6, 24(%1)\n\t" \
+ "ld $7, 32(%1)\n\t" \
+ "ld $8, 40(%1)\n\t" \
+ "ld $9, 48(%1)\n\t" \
+ "ld $10, 56(%1)\n\t" \
+ "ld $11, 64(%1)\n\t" \
+ "ld $25, 0(%1)\n\t" /* target->t9 */ \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "daddu $29, $29, 24\n\t" \
+ "move %0, $2\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
} while (0)
+#define CALL_FN_W_12W(lval, orig, arg1,arg2,arg3,arg4,arg5, \
+ arg6,arg7,arg8,arg9,arg10, \
+ arg11,arg12) \
+ do { \
+ volatile OrigFn _orig = (orig); \
+ volatile unsigned long _argvec[13]; \
+ volatile unsigned long _res; \
+ _argvec[0] = (unsigned long)_orig.nraddr; \
+ _argvec[1] = (unsigned long)(arg1); \
+ _argvec[2] = (unsigned long)(arg2); \
+ _argvec[3] = (unsigned long)(arg3); \
+ _argvec[4] = (unsigned long)(arg4); \
+ _argvec[5] = (unsigned long)(arg5); \
+ _argvec[6] = (unsigned long)(arg6); \
+ _argvec[7] = (unsigned long)(arg7); \
+ _argvec[8] = (unsigned long)(arg8); \
+ _argvec[9] = (unsigned long)(arg9); \
+ _argvec[10] = (unsigned long)(arg10); \
+ _argvec[11] = (unsigned long)(arg11); \
+ _argvec[12] = (unsigned long)(arg12); \
+ __asm__ volatile( \
+ "dsubu $29, $29, 32\n\t" \
+ "ld $4, 72(%1)\n\t" \
+ "sd $4, 0($29)\n\t" \
+ "ld $4, 80(%1)\n\t" \
+ "sd $4, 8($29)\n\t" \
+ "ld $4, 88(%1)\n\t" \
+ "sd $4, 16($29)\n\t" \
+ "ld $4, 96(%1)\n\t" \
+ "sd $4, 24($29)\n\t" \
+ "ld $4, 8(%1)\n\t" \
+ "ld $5, 16(%1)\n\t" \
+ "ld $6, 24(%1)\n\t" \
+ "ld $7, 32(%1)\n\t" \
+ "ld $8, 40(%1)\n\t" \
+ "ld $9, 48(%1)\n\t" \
+ "ld $10, 56(%1)\n\t" \
+ "ld $11, 64(%1)\n\t" \
+ "ld $25, 0(%1)\n\t" /* target->t9 */ \
+ VALGRIND_CALL_NOREDIR_T9 \
+ "daddu $29, $29, 32\n\t" \
+ "move %0, $2\n" \
+ : /*out*/ "=r" (_res) \
+ : /*in*/ "r" (&_argvec[0]) \
+ : /*trash*/ "memory", __CALLER_SAVED_REGS \
+ ); \
+ lval = (__typeof__(lval)) _res; \
+ } while (0)
-#endif /* PLAT_s390x_linux */
+#endif /* PLAT_mips64_linux */
/* ------------------------------------------------------------------ */
errors. */
VG_USERREQ__COUNT_ERRORS = 0x1201,
- /* Allows a string (gdb monitor command) to be passed to the tool
- Used for interaction with vgdb/gdb */
+ /* Allows the client program and/or gdbserver to execute a monitor
+ command. */
VG_USERREQ__GDB_MONITOR_COMMAND = 0x1202,
/* These are useful and can be interpreted by any tool that
disablement indicator. Hence 1 disables or further
disables errors, and -1 moves back towards enablement.
Other values are not allowed. */
- VG_USERREQ__CHANGE_ERR_DISABLEMENT = 0x1801
+ VG_USERREQ__CHANGE_ERR_DISABLEMENT = 0x1801,
+
+ /* Initialise IR injection */
+ VG_USERREQ__VEX_INIT_FOR_IRI = 0x1901
} Vg_ClientRequest;
#if !defined(__GNUC__)
is the number of characters printed, excluding the "**<pid>** " part at the
start and the backtrace (if present). */
-#if defined(__GNUC__) || defined(__INTEL_COMPILER)
+#if defined(__GNUC__) || defined(__INTEL_COMPILER) && !defined(_MSC_VER)
/* Modern GCC will optimize the static routine out if unused,
and unused attribute will shut down warnings about it. */
static int VALGRIND_PRINTF(const char *format, ...)
#if defined(NVALGRIND)
return 0;
#else /* NVALGRIND */
-#if defined(_MSC_VER) || defined(PLAT_amd64_win64)
+#if defined(_MSC_VER) || defined(__MINGW64__)
uintptr_t _qzz_res;
#else
unsigned long _qzz_res;
#endif
va_list vargs;
va_start(vargs, format);
-#if defined(_MSC_VER) || defined(PLAT_amd64_win64)
+#if defined(_MSC_VER) || defined(__MINGW64__)
_qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0,
VG_USERREQ__PRINTF_VALIST_BY_REF,
(uintptr_t)format,
#endif /* NVALGRIND */
}
-#if defined(__GNUC__) || defined(__INTEL_COMPILER)
+#if defined(__GNUC__) || defined(__INTEL_COMPILER) && !defined(_MSC_VER)
static int VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
__attribute__((format(__printf__, 1, 2), __unused__));
#endif
#if defined(NVALGRIND)
return 0;
#else /* NVALGRIND */
-#if defined(_MSC_VER) || defined(PLAT_amd64_win64)
+#if defined(_MSC_VER) || defined(__MINGW64__)
uintptr_t _qzz_res;
#else
unsigned long _qzz_res;
#endif
va_list vargs;
va_start(vargs, format);
-#if defined(_MSC_VER) || defined(PLAT_amd64_win64)
+#if defined(_MSC_VER) || defined(__MINGW64__)
_qzz_res = VALGRIND_DO_CLIENT_REQUEST_EXPR(0,
VG_USERREQ__PRINTF_BACKTRACE_VALIST_BY_REF,
(uintptr_t)format,
VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__CHANGE_ERR_DISABLEMENT, \
-1, 0, 0, 0, 0)
+/* Execute a monitor command from the client program.
+ If a connection is opened with GDB, the output will be sent
+ according to the output mode set for vgdb.
+ If no connection is opened, output will go to the log output.
+ Returns 1 if command not recognised, 0 otherwise. */
+#define VALGRIND_MONITOR_COMMAND(command) \
+ VALGRIND_DO_CLIENT_REQUEST_EXPR(0, VG_USERREQ__GDB_MONITOR_COMMAND, \
+ command, 0, 0, 0, 0)
+
+
#undef PLAT_x86_darwin
#undef PLAT_amd64_darwin
#undef PLAT_x86_win32
+#undef PLAT_amd64_win64
#undef PLAT_x86_linux
#undef PLAT_amd64_linux
#undef PLAT_ppc32_linux
#undef PLAT_ppc64_linux
#undef PLAT_arm_linux
#undef PLAT_s390x_linux
+#undef PLAT_mips32_linux
+#undef PLAT_mips64_linux
#endif /* __VALGRIND_H */