From 42c4aaadb737e0e672b3fb86b2c41ff59f0fb8bc Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Tue, 24 Oct 2006 16:42:40 +1000 Subject: [POWERPC] Consolidate feature fixup code There are currently two versions of the functions for applying the feature fixups, one for CPU features and one for firmware features. In addition, they are both in assembly and with separate implementations for 32 and 64 bits. identify_cpu() is also implemented in assembly and separately for 32 and 64 bits. This patch replaces them with a pair of C functions. The call sites are slightly moved on ppc64 as well to be called from C instead of from assembly, though it's a very small change, and thus shouldn't cause any problem. Signed-off-by: Benjamin Herrenschmidt Acked-by: Olof Johansson Signed-off-by: Paul Mackerras --- include/asm-powerpc/cputable.h | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'include/asm-powerpc/cputable.h') diff --git a/include/asm-powerpc/cputable.h b/include/asm-powerpc/cputable.h index 12707ab9dc98..4d22218739e0 100644 --- a/include/asm-powerpc/cputable.h +++ b/include/asm-powerpc/cputable.h @@ -89,8 +89,11 @@ struct cpu_spec { extern struct cpu_spec *cur_cpu_spec; -extern void identify_cpu(unsigned long offset, unsigned long cpu); -extern void do_cpu_ftr_fixups(unsigned long offset); +extern unsigned int __start___ftr_fixup, __stop___ftr_fixup; + +extern struct cpu_spec *identify_cpu(unsigned long offset); +extern void do_feature_fixups(unsigned long offset, unsigned long value, + void *fixup_start, void *fixup_end); #endif /* __ASSEMBLY__ */ -- cgit From 7aeb732428fc8e2ecae6d432873770c12f04a979 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Fri, 20 Oct 2006 11:47:16 +1000 Subject: [POWERPC] Support nested cpu feature sections This patch adds some macros that can be used with an explicit label in order to nest cpu features. This should be used very careful but is necessary for the upcoming cell TB fixup. Signed-off-by: Benjamin Herrenschmidt Acked-by: Olof Johansson Signed-off-by: Paul Mackerras --- include/asm-powerpc/cputable.h | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) (limited to 'include/asm-powerpc/cputable.h') diff --git a/include/asm-powerpc/cputable.h b/include/asm-powerpc/cputable.h index 4d22218739e0..65faf322ace0 100644 --- a/include/asm-powerpc/cputable.h +++ b/include/asm-powerpc/cputable.h @@ -434,30 +434,34 @@ static inline int cpu_has_feature(unsigned long feature) #ifdef __ASSEMBLY__ -#define BEGIN_FTR_SECTION 98: +#define BEGIN_FTR_SECTION_NESTED(label) label: +#define BEGIN_FTR_SECTION BEGIN_FTR_SECTION_NESTED(98) #ifndef __powerpc64__ -#define END_FTR_SECTION(msk, val) \ +#define END_FTR_SECTION_NESTED(msk, val, label) \ 99: \ .section __ftr_fixup,"a"; \ .align 2; \ .long msk; \ .long val; \ - .long 98b; \ + .long label##b; \ .long 99b; \ .previous #else /* __powerpc64__ */ -#define END_FTR_SECTION(msk, val) \ +#define END_FTR_SECTION_NESTED(msk, val, label) \ 99: \ .section __ftr_fixup,"a"; \ .align 3; \ .llong msk; \ .llong val; \ - .llong 98b; \ + .llong label##b; \ .llong 99b; \ .previous #endif /* __powerpc64__ */ +#define END_FTR_SECTION(msk, val) \ + END_FTR_SECTION_NESTED(msk, val, 98) + #define END_FTR_SECTION_IFSET(msk) END_FTR_SECTION((msk), (msk)) #define END_FTR_SECTION_IFCLR(msk) END_FTR_SECTION((msk), 0) #endif /* __ASSEMBLY__ */ -- cgit From 0909c8c2d547e45ca50e2492b08ec93a37b35237 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Fri, 20 Oct 2006 11:47:18 +1000 Subject: [POWERPC] Support feature fixups in vdso's This patch reworks the feature fixup mecanism so vdso's can be fixed up. The main issue was that the construct: .long label (or .llong on 64 bits) will not work in the case of a shared library like the vdso. It will generate an empty placeholder in the fixup table along with a reloc, which is not something we can deal with in the vdso. The idea here (thanks Alan Modra !) is to instead use something like: 1: .long label - 1b That is, the feature fixup tables no longer contain addresses of bits of code to patch, but offsets of such code from the fixup table entry itself. That is properly resolved by ld when building the .so's. I've modified the fixup mecanism generically to use that method for the rest of the kernel as well. Another trick is that the 32 bits vDSO included in the 64 bits kernel need to have a table in the 64 bits format. However, gas does not support 32 bits code with a statement of the form: .llong label - 1b (Or even just .llong label) That is, it cannot emit the right fixup/relocation for the linker to use to assign a 32 bits address to an .llong field. Thus, in the specific case of the 32 bits vdso built as part of the 64 bits kernel, we are using a modified macro that generates: .long 0xffffffff .llong label - 1b Note that is assumes that the value is negative which is enforced by the .lds (those offsets are always negative as the .text is always before the fixup table and gas doesn't support emiting the reloc the other way around). Signed-off-by: Benjamin Herrenschmidt Signed-off-by: Paul Mackerras --- include/asm-powerpc/cputable.h | 31 +++++-------------------------- 1 file changed, 5 insertions(+), 26 deletions(-) (limited to 'include/asm-powerpc/cputable.h') diff --git a/include/asm-powerpc/cputable.h b/include/asm-powerpc/cputable.h index 65faf322ace0..02e52d68cbbe 100644 --- a/include/asm-powerpc/cputable.h +++ b/include/asm-powerpc/cputable.h @@ -92,8 +92,8 @@ extern struct cpu_spec *cur_cpu_spec; extern unsigned int __start___ftr_fixup, __stop___ftr_fixup; extern struct cpu_spec *identify_cpu(unsigned long offset); -extern void do_feature_fixups(unsigned long offset, unsigned long value, - void *fixup_start, void *fixup_end); +extern void do_feature_fixups(unsigned long value, void *fixup_start, + void *fixup_end); #endif /* __ASSEMBLY__ */ @@ -435,32 +435,11 @@ static inline int cpu_has_feature(unsigned long feature) #ifdef __ASSEMBLY__ #define BEGIN_FTR_SECTION_NESTED(label) label: -#define BEGIN_FTR_SECTION BEGIN_FTR_SECTION_NESTED(98) - -#ifndef __powerpc64__ -#define END_FTR_SECTION_NESTED(msk, val, label) \ -99: \ - .section __ftr_fixup,"a"; \ - .align 2; \ - .long msk; \ - .long val; \ - .long label##b; \ - .long 99b; \ - .previous -#else /* __powerpc64__ */ +#define BEGIN_FTR_SECTION BEGIN_FTR_SECTION_NESTED(97) #define END_FTR_SECTION_NESTED(msk, val, label) \ -99: \ - .section __ftr_fixup,"a"; \ - .align 3; \ - .llong msk; \ - .llong val; \ - .llong label##b; \ - .llong 99b; \ - .previous -#endif /* __powerpc64__ */ - + MAKE_FTR_SECTION_ENTRY(msk, val, label, __ftr_fixup) #define END_FTR_SECTION(msk, val) \ - END_FTR_SECTION_NESTED(msk, val, 98) + END_FTR_SECTION_NESTED(msk, val, 97) #define END_FTR_SECTION_IFSET(msk) END_FTR_SECTION((msk), (msk)) #define END_FTR_SECTION_IFCLR(msk) END_FTR_SECTION((msk), 0) -- cgit From 859deea949c382d9ccb6397fe33df3703ecef45d Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Fri, 20 Oct 2006 14:37:05 +1000 Subject: [POWERPC] Cell timebase bug workaround The Cell CPU timebase has an erratum. When reading the entire 64 bits of the timebase with one mftb instruction, there is a handful of cycles window during which one might read a value with the low order 32 bits already reset to 0x00000000 but the high order bits not yet incremeted by one. This fixes it by reading the timebase again until the low order 32 bits is no longer 0. That might introduce occasional latencies if hitting mftb just at the wrong time, but no more than 70ns on a cell blade, and that was considered acceptable. Signed-off-by: Benjamin Herrenschmidt Acked-by: Olof Johansson Signed-off-by: Paul Mackerras --- include/asm-powerpc/cputable.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include/asm-powerpc/cputable.h') diff --git a/include/asm-powerpc/cputable.h b/include/asm-powerpc/cputable.h index 02e52d68cbbe..a9a40149a7c0 100644 --- a/include/asm-powerpc/cputable.h +++ b/include/asm-powerpc/cputable.h @@ -147,6 +147,7 @@ extern void do_feature_fixups(unsigned long value, void *fixup_start, #define CPU_FTR_CI_LARGE_PAGE LONG_ASM_CONST(0x0000100000000000) #define CPU_FTR_PAUSE_ZERO LONG_ASM_CONST(0x0000200000000000) #define CPU_FTR_PURR LONG_ASM_CONST(0x0000400000000000) +#define CPU_FTR_CELL_TB_BUG LONG_ASM_CONST(0x0000800000000000) #ifndef __ASSEMBLY__ @@ -335,7 +336,7 @@ extern void do_feature_fixups(unsigned long value, void *fixup_start, #define CPU_FTRS_CELL (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \ CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \ - CPU_FTR_PAUSE_ZERO | CPU_FTR_CI_LARGE_PAGE) + CPU_FTR_PAUSE_ZERO | CPU_FTR_CI_LARGE_PAGE | CPU_FTR_CELL_TB_BUG) #define CPU_FTRS_PA6T (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \ CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | \ CPU_FTR_ALTIVEC_COMP | CPU_FTR_CI_LARGE_PAGE | \ -- cgit