mirror of
https://github.com/torvalds/linux.git
synced 2024-11-17 17:41:44 +00:00
0909c8c2d5
This patch reworks the feature fixup mecanism so vdso's can be fixed up. The main issue was that the construct: .long label (or .llong on 64 bits) will not work in the case of a shared library like the vdso. It will generate an empty placeholder in the fixup table along with a reloc, which is not something we can deal with in the vdso. The idea here (thanks Alan Modra !) is to instead use something like: 1: .long label - 1b That is, the feature fixup tables no longer contain addresses of bits of code to patch, but offsets of such code from the fixup table entry itself. That is properly resolved by ld when building the .so's. I've modified the fixup mecanism generically to use that method for the rest of the kernel as well. Another trick is that the 32 bits vDSO included in the 64 bits kernel need to have a table in the 64 bits format. However, gas does not support 32 bits code with a statement of the form: .llong label - 1b (Or even just .llong label) That is, it cannot emit the right fixup/relocation for the linker to use to assign a 32 bits address to an .llong field. Thus, in the specific case of the 32 bits vdso built as part of the 64 bits kernel, we are using a modified macro that generates: .long 0xffffffff .llong label - 1b Note that is assumes that the value is negative which is enforced by the .lds (those offsets are always negative as the .text is always before the fixup table and gas doesn't support emiting the reloc the other way around). Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
109 lines
2.9 KiB
C
109 lines
2.9 KiB
C
#ifndef _ASM_POWERPC_ASM_COMPAT_H
|
|
#define _ASM_POWERPC_ASM_COMPAT_H
|
|
|
|
#include <asm/types.h>
|
|
|
|
#ifdef __ASSEMBLY__
|
|
# define stringify_in_c(...) __VA_ARGS__
|
|
# define ASM_CONST(x) x
|
|
#else
|
|
/* This version of stringify will deal with commas... */
|
|
# define __stringify_in_c(...) #__VA_ARGS__
|
|
# define stringify_in_c(...) __stringify_in_c(__VA_ARGS__) " "
|
|
# define __ASM_CONST(x) x##UL
|
|
# define ASM_CONST(x) __ASM_CONST(x)
|
|
#endif
|
|
|
|
|
|
/*
|
|
* Feature section common macros
|
|
*
|
|
* Note that the entries now contain offsets between the table entry
|
|
* and the code rather than absolute code pointers in order to be
|
|
* useable with the vdso shared library. There is also an assumption
|
|
* that values will be negative, that is, the fixup table has to be
|
|
* located after the code it fixes up.
|
|
*/
|
|
#ifdef CONFIG_PPC64
|
|
#ifdef __powerpc64__
|
|
/* 64 bits kernel, 64 bits code */
|
|
#define MAKE_FTR_SECTION_ENTRY(msk, val, label, sect) \
|
|
99: \
|
|
.section sect,"a"; \
|
|
.align 3; \
|
|
98: \
|
|
.llong msk; \
|
|
.llong val; \
|
|
.llong label##b-98b; \
|
|
.llong 99b-98b; \
|
|
.previous
|
|
#else /* __powerpc64__ */
|
|
/* 64 bits kernel, 32 bits code (ie. vdso32) */
|
|
#define MAKE_FTR_SECTION_ENTRY(msk, val, label, sect) \
|
|
99: \
|
|
.section sect,"a"; \
|
|
.align 3; \
|
|
98: \
|
|
.llong msk; \
|
|
.llong val; \
|
|
.long 0xffffffff; \
|
|
.long label##b-98b; \
|
|
.long 0xffffffff; \
|
|
.long 99b-98b; \
|
|
.previous
|
|
#endif /* !__powerpc64__ */
|
|
#else /* CONFIG_PPC64 */
|
|
/* 32 bits kernel, 32 bits code */
|
|
#define MAKE_FTR_SECTION_ENTRY(msk, val, label, sect) \
|
|
99: \
|
|
.section sect,"a"; \
|
|
.align 2; \
|
|
98: \
|
|
.long msk; \
|
|
.long val; \
|
|
.long label##b-98b; \
|
|
.long 99b-98b; \
|
|
.previous
|
|
#endif /* !CONFIG_PPC64 */
|
|
|
|
#ifdef __powerpc64__
|
|
|
|
/* operations for longs and pointers */
|
|
#define PPC_LL stringify_in_c(ld)
|
|
#define PPC_STL stringify_in_c(std)
|
|
#define PPC_LCMPI stringify_in_c(cmpdi)
|
|
#define PPC_LONG stringify_in_c(.llong)
|
|
#define PPC_TLNEI stringify_in_c(tdnei)
|
|
#define PPC_LLARX stringify_in_c(ldarx)
|
|
#define PPC_STLCX stringify_in_c(stdcx.)
|
|
#define PPC_CNTLZL stringify_in_c(cntlzd)
|
|
|
|
#else /* 32-bit */
|
|
|
|
/* operations for longs and pointers */
|
|
#define PPC_LL stringify_in_c(lwz)
|
|
#define PPC_STL stringify_in_c(stw)
|
|
#define PPC_LCMPI stringify_in_c(cmpwi)
|
|
#define PPC_LONG stringify_in_c(.long)
|
|
#define PPC_TLNEI stringify_in_c(twnei)
|
|
#define PPC_LLARX stringify_in_c(lwarx)
|
|
#define PPC_STLCX stringify_in_c(stwcx.)
|
|
#define PPC_CNTLZL stringify_in_c(cntlzw)
|
|
|
|
#endif
|
|
|
|
#ifdef __KERNEL__
|
|
#ifdef CONFIG_IBM405_ERR77
|
|
/* Erratum #77 on the 405 means we need a sync or dcbt before every
|
|
* stwcx. The old ATOMIC_SYNC_FIX covered some but not all of this.
|
|
*/
|
|
#define PPC405_ERR77(ra,rb) stringify_in_c(dcbt ra, rb;)
|
|
#define PPC405_ERR77_SYNC stringify_in_c(sync;)
|
|
#else
|
|
#define PPC405_ERR77(ra,rb)
|
|
#define PPC405_ERR77_SYNC
|
|
#endif
|
|
#endif
|
|
|
|
#endif /* _ASM_POWERPC_ASM_COMPAT_H */
|