forked from Minki/linux
d59fe3f13d
While the description of the commit that originally introduced asmlinkage_protect() validly says that this doesn't guarantee clobbering of the function arguments, using "m" constraints rather than "g" ones reduces the risk (by making it less attractive to the compiler to move those variables into registers) and generally results in better code (because we know the arguments are in memory anyway, and are frequently - if not always - used just once, with the second [compiler visible] use in asmlinkage_protect() itself being a fake one). Signed-off-by: Jan Beulich <jbeulich@suse.com> Cc: <roland@hack.frob.com> Cc: <viro@zeniv.linux.org.uk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Andrew Morton <akpm@linux-foundation.org> Link: http://lkml.kernel.org/r/50FE84EC02000078000B83B7@nat28.tlf.novell.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
62 lines
2.0 KiB
C
62 lines
2.0 KiB
C
#ifndef _ASM_X86_LINKAGE_H
|
|
#define _ASM_X86_LINKAGE_H
|
|
|
|
#include <linux/stringify.h>
|
|
|
|
#undef notrace
|
|
#define notrace __attribute__((no_instrument_function))
|
|
|
|
#ifdef CONFIG_X86_32
|
|
#define asmlinkage CPP_ASMLINKAGE __attribute__((regparm(0)))
|
|
|
|
/*
|
|
* Make sure the compiler doesn't do anything stupid with the
|
|
* arguments on the stack - they are owned by the *caller*, not
|
|
* the callee. This just fools gcc into not spilling into them,
|
|
* and keeps it from doing tailcall recursion and/or using the
|
|
* stack slots for temporaries, since they are live and "used"
|
|
* all the way to the end of the function.
|
|
*
|
|
* NOTE! On x86-64, all the arguments are in registers, so this
|
|
* only matters on a 32-bit kernel.
|
|
*/
|
|
#define asmlinkage_protect(n, ret, args...) \
|
|
__asmlinkage_protect##n(ret, ##args)
|
|
#define __asmlinkage_protect_n(ret, args...) \
|
|
__asm__ __volatile__ ("" : "=r" (ret) : "0" (ret), ##args)
|
|
#define __asmlinkage_protect0(ret) \
|
|
__asmlinkage_protect_n(ret)
|
|
#define __asmlinkage_protect1(ret, arg1) \
|
|
__asmlinkage_protect_n(ret, "m" (arg1))
|
|
#define __asmlinkage_protect2(ret, arg1, arg2) \
|
|
__asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2))
|
|
#define __asmlinkage_protect3(ret, arg1, arg2, arg3) \
|
|
__asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3))
|
|
#define __asmlinkage_protect4(ret, arg1, arg2, arg3, arg4) \
|
|
__asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
|
|
"m" (arg4))
|
|
#define __asmlinkage_protect5(ret, arg1, arg2, arg3, arg4, arg5) \
|
|
__asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
|
|
"m" (arg4), "m" (arg5))
|
|
#define __asmlinkage_protect6(ret, arg1, arg2, arg3, arg4, arg5, arg6) \
|
|
__asmlinkage_protect_n(ret, "m" (arg1), "m" (arg2), "m" (arg3), \
|
|
"m" (arg4), "m" (arg5), "m" (arg6))
|
|
|
|
#endif /* CONFIG_X86_32 */
|
|
|
|
#ifdef __ASSEMBLY__
|
|
|
|
#define GLOBAL(name) \
|
|
.globl name; \
|
|
name:
|
|
|
|
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_ALIGNMENT_16)
|
|
#define __ALIGN .p2align 4, 0x90
|
|
#define __ALIGN_STR __stringify(__ALIGN)
|
|
#endif
|
|
|
|
#endif /* __ASSEMBLY__ */
|
|
|
|
#endif /* _ASM_X86_LINKAGE_H */
|
|
|