summaryrefslogtreecommitdiff
path: root/arch/powerpc/include/asm/ppc_asm.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/include/asm/ppc_asm.h')
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h301
1 files changed, 185 insertions, 116 deletions
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index 6b03dff61a05..1d1018c1e482 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -10,89 +10,88 @@
#include <asm/ppc-opcode.h>
#include <asm/firmware.h>
#include <asm/feature-fixups.h>
+#include <asm/extable.h>
#ifdef __ASSEMBLY__
#define SZL (BITS_PER_LONG/8)
/*
- * Stuff for accurate CPU time accounting.
- * These macros handle transitions between user and system state
- * in exception entry and exit and accumulate time to the
- * user_time and system_time fields in the paca.
+ * This expands to a sequence of operations with reg incrementing from
+ * start to end inclusive, of this form:
+ *
+ * op reg, (offset + (width * reg))(base)
+ *
+ * Note that offset is not the offset of the first operation unless start
+ * is zero (or width is zero).
*/
+.macro OP_REGS op, width, start, end, base, offset
+ .Lreg=\start
+ .rept (\end - \start + 1)
+ \op .Lreg, \offset + \width * .Lreg(\base)
+ .Lreg=.Lreg+1
+ .endr
+.endm
-#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
-#define ACCOUNT_CPU_USER_ENTRY(ptr, ra, rb)
-#define ACCOUNT_CPU_USER_EXIT(ptr, ra, rb)
-#define ACCOUNT_STOLEN_TIME
-#else
-#define ACCOUNT_CPU_USER_ENTRY(ptr, ra, rb) \
- MFTB(ra); /* get timebase */ \
- PPC_LL rb, ACCOUNT_STARTTIME_USER(ptr); \
- PPC_STL ra, ACCOUNT_STARTTIME(ptr); \
- subf rb,rb,ra; /* subtract start value */ \
- PPC_LL ra, ACCOUNT_USER_TIME(ptr); \
- add ra,ra,rb; /* add on to user time */ \
- PPC_STL ra, ACCOUNT_USER_TIME(ptr); \
-
-#define ACCOUNT_CPU_USER_EXIT(ptr, ra, rb) \
- MFTB(ra); /* get timebase */ \
- PPC_LL rb, ACCOUNT_STARTTIME(ptr); \
- PPC_STL ra, ACCOUNT_STARTTIME_USER(ptr); \
- subf rb,rb,ra; /* subtract start value */ \
- PPC_LL ra, ACCOUNT_SYSTEM_TIME(ptr); \
- add ra,ra,rb; /* add on to system time */ \
- PPC_STL ra, ACCOUNT_SYSTEM_TIME(ptr)
-
-#ifdef CONFIG_PPC_SPLPAR
-#define ACCOUNT_STOLEN_TIME \
-BEGIN_FW_FTR_SECTION; \
- beq 33f; \
- /* from user - see if there are any DTL entries to process */ \
- ld r10,PACALPPACAPTR(r13); /* get ptr to VPA */ \
- ld r11,PACA_DTL_RIDX(r13); /* get log read index */ \
- addi r10,r10,LPPACA_DTLIDX; \
- LDX_BE r10,0,r10; /* get log write index */ \
- cmpd cr1,r11,r10; \
- beq+ cr1,33f; \
- bl accumulate_stolen_time; \
- ld r12,_MSR(r1); \
- andi. r10,r12,MSR_PR; /* Restore cr0 (coming from user) */ \
-33: \
-END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
-
-#else /* CONFIG_PPC_SPLPAR */
-#define ACCOUNT_STOLEN_TIME
-
-#endif /* CONFIG_PPC_SPLPAR */
-
-#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
+/*
+ * This expands to a sequence of register clears for regs start to end
+ * inclusive, of the form:
+ *
+ * li rN, 0
+ */
+.macro ZEROIZE_REGS start, end
+ .Lreg=\start
+ .rept (\end - \start + 1)
+ li .Lreg, 0
+ .Lreg=.Lreg+1
+ .endr
+.endm
/*
* Macros for storing registers into and loading registers from
* exception frames.
*/
#ifdef __powerpc64__
-#define SAVE_GPR(n, base) std n,GPR0+8*(n)(base)
-#define REST_GPR(n, base) ld n,GPR0+8*(n)(base)
-#define SAVE_NVGPRS(base) SAVE_8GPRS(14, base); SAVE_10GPRS(22, base)
-#define REST_NVGPRS(base) REST_8GPRS(14, base); REST_10GPRS(22, base)
+#define SAVE_GPRS(start, end, base) OP_REGS std, 8, start, end, base, GPR0
+#define REST_GPRS(start, end, base) OP_REGS ld, 8, start, end, base, GPR0
+#define SAVE_NVGPRS(base) SAVE_GPRS(14, 31, base)
+#define REST_NVGPRS(base) REST_GPRS(14, 31, base)
#else
-#define SAVE_GPR(n, base) stw n,GPR0+4*(n)(base)
-#define REST_GPR(n, base) lwz n,GPR0+4*(n)(base)
-#define SAVE_NVGPRS(base) stmw 13, GPR0+4*13(base)
-#define REST_NVGPRS(base) lmw 13, GPR0+4*13(base)
+#define SAVE_GPRS(start, end, base) OP_REGS stw, 4, start, end, base, GPR0
+#define REST_GPRS(start, end, base) OP_REGS lwz, 4, start, end, base, GPR0
+#define SAVE_NVGPRS(base) SAVE_GPRS(13, 31, base)
+#define REST_NVGPRS(base) REST_GPRS(13, 31, base)
#endif
-#define SAVE_2GPRS(n, base) SAVE_GPR(n, base); SAVE_GPR(n+1, base)
-#define SAVE_4GPRS(n, base) SAVE_2GPRS(n, base); SAVE_2GPRS(n+2, base)
-#define SAVE_8GPRS(n, base) SAVE_4GPRS(n, base); SAVE_4GPRS(n+4, base)
-#define SAVE_10GPRS(n, base) SAVE_8GPRS(n, base); SAVE_2GPRS(n+8, base)
-#define REST_2GPRS(n, base) REST_GPR(n, base); REST_GPR(n+1, base)
-#define REST_4GPRS(n, base) REST_2GPRS(n, base); REST_2GPRS(n+2, base)
-#define REST_8GPRS(n, base) REST_4GPRS(n, base); REST_4GPRS(n+4, base)
-#define REST_10GPRS(n, base) REST_8GPRS(n, base); REST_2GPRS(n+8, base)
+#define ZEROIZE_GPRS(start, end) ZEROIZE_REGS start, end
+#ifdef __powerpc64__
+#define ZEROIZE_NVGPRS() ZEROIZE_GPRS(14, 31)
+#else
+#define ZEROIZE_NVGPRS() ZEROIZE_GPRS(13, 31)
+#endif
+#define ZEROIZE_GPR(n) ZEROIZE_GPRS(n, n)
+
+#define SAVE_GPR(n, base) SAVE_GPRS(n, n, base)
+#define REST_GPR(n, base) REST_GPRS(n, n, base)
+
+/* macros for handling user register sanitisation */
+#ifdef CONFIG_INTERRUPT_SANITIZE_REGISTERS
+#define SANITIZE_SYSCALL_GPRS() ZEROIZE_GPR(0); \
+ ZEROIZE_GPRS(5, 12); \
+ ZEROIZE_NVGPRS()
+#define SANITIZE_GPR(n) ZEROIZE_GPR(n)
+#define SANITIZE_GPRS(start, end) ZEROIZE_GPRS(start, end)
+#define SANITIZE_NVGPRS() ZEROIZE_NVGPRS()
+#define SANITIZE_RESTORE_NVGPRS() REST_NVGPRS(r1)
+#define HANDLER_RESTORE_NVGPRS()
+#else
+#define SANITIZE_SYSCALL_GPRS()
+#define SANITIZE_GPR(n)
+#define SANITIZE_GPRS(start, end)
+#define SANITIZE_NVGPRS()
+#define SANITIZE_RESTORE_NVGPRS()
+#define HANDLER_RESTORE_NVGPRS() REST_NVGPRS(r1)
+#endif /* CONFIG_INTERRUPT_SANITIZE_REGISTERS */
#define SAVE_FPR(n, base) stfd n,8*TS_FPRWIDTH*(n)(base)
#define SAVE_2FPRS(n, base) SAVE_FPR(n, base); SAVE_FPR(n+1, base)
@@ -180,22 +179,38 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
#define VCPU_GPR(n) __VCPU_GPR(__REG_##n)
#ifdef __KERNEL__
-#ifdef CONFIG_PPC64
+
+/*
+ * Used to name C functions called from asm
+ */
+#ifdef CONFIG_PPC_KERNEL_PCREL
+#define CFUNC(name) name@notoc
+#else
+#define CFUNC(name) name
+#endif
+
+/*
+ * We use __powerpc64__ here because we want the compat VDSO to use the 32-bit
+ * version below in the else case of the ifdef.
+ */
+#ifdef __powerpc64__
#define STACKFRAMESIZE 256
#define __STK_REG(i) (112 + ((i)-14)*8)
#define STK_REG(i) __STK_REG(__REG_##i)
-#ifdef PPC64_ELF_ABI_v2
+#ifdef CONFIG_PPC64_ELF_ABI_V2
#define STK_GOT 24
-#define __STK_PARAM(i) (32 + ((i)-3)*8)
+#define STK_PARAM_AREA 32
#else
#define STK_GOT 40
-#define __STK_PARAM(i) (48 + ((i)-3)*8)
+#define STK_PARAM_AREA 48
#endif
+
+#define __STK_PARAM(i) (STK_PARAM_AREA + ((i)-3)*8)
#define STK_PARAM(i) __STK_PARAM(__REG_##i)
-#ifdef PPC64_ELF_ABI_v2
+#ifdef CONFIG_PPC64_ELF_ABI_V2
#define _GLOBAL(name) \
.align 2 ; \
@@ -203,6 +218,9 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
.globl name; \
name:
+#ifdef CONFIG_PPC_KERNEL_PCREL
+#define _GLOBAL_TOC _GLOBAL
+#else
#define _GLOBAL_TOC(name) \
.align 2 ; \
.type name,@function; \
@@ -211,6 +229,7 @@ name: \
0: addis r2,r12,(.TOC.-0b)@ha; \
addi r2,r2,(.TOC.-0b)@l; \
.localentry name,.-name
+#endif
#define DOTSYM(a) a
@@ -240,17 +259,14 @@ GLUE(.,name):
#else /* 32-bit */
-#define _ENTRY(n) \
- .globl n; \
-n:
-
#define _GLOBAL(n) \
- .stabs __stringify(n:F-1),N_FUN,0,0,n;\
.globl n; \
n:
#define _GLOBAL_TOC(name) _GLOBAL(name)
+#define DOTSYM(a) a
+
#endif
/*
@@ -306,7 +322,7 @@ n:
/* Be careful, this will clobber the lr register. */
#define LOAD_REG_ADDR_PIC(reg, name) \
- bl 0f; \
+ bcl 20,31,$+4; \
0: mflr reg; \
addis reg,reg,(name - 0b)@ha; \
addi reg,reg,(name - 0b)@l;
@@ -345,6 +361,17 @@ n:
#ifdef __powerpc64__
+#ifdef CONFIG_PPC_KERNEL_PCREL
+#define __LOAD_PACA_TOC(reg) \
+ li reg,-1
+#else
+#define __LOAD_PACA_TOC(reg) \
+ ld reg,PACATOC(r13)
+#endif
+
+#define LOAD_PACA_TOC() \
+ __LOAD_PACA_TOC(r2)
+
#define LOAD_REG_IMMEDIATE(reg, expr) __LOAD_REG_IMMEDIATE reg, expr
#define LOAD_REG_IMMEDIATE_SYM(reg, tmp, expr) \
@@ -354,8 +381,26 @@ n:
ori reg, reg, (expr)@l; \
rldimi reg, tmp, 32, 0
+#ifdef CONFIG_PPC_KERNEL_PCREL
+#define LOAD_REG_ADDR(reg,name) \
+ pla reg,name@pcrel
+
+#else
#define LOAD_REG_ADDR(reg,name) \
- ld reg,name@got(r2)
+ addis reg,r2,name@toc@ha; \
+ addi reg,reg,name@toc@l
+#endif
+
+#ifdef CONFIG_PPC_BOOK3E_64
+/*
+ * This is used in register-constrained interrupt handlers. Not to be used
+ * by BOOK3S. ld complains with "got/toc optimization is not supported" if r2
+ * is not used for the TOC offset, so use @got(tocreg). If the interrupt
+ * handlers saved r2 instead, LOAD_REG_ADDR could be used.
+ */
+#define LOAD_REG_ADDR_ALTTOC(reg,tocreg,name) \
+ ld reg,name@got(tocreg)
+#endif
#define LOAD_REG_ADDRBASE(reg,name) LOAD_REG_ADDR(reg,name)
#define ADDROFF(name) 0
@@ -363,6 +408,15 @@ n:
/* offsets for stack frame layout */
#define LRSAVE 16
+/*
+ * GCC stack frames follow a different pattern on 32 vs 64. This can be used
+ * to make asm frames be consistent with C.
+ */
+#define PPC_CREATE_STACK_FRAME(size) \
+ mflr r0; \
+ std r0,16(r1); \
+ stdu r1,-(size)(r1)
+
#else /* 32-bit */
#define LOAD_REG_IMMEDIATE(reg, expr) __LOAD_REG_IMMEDIATE_32 reg, expr
@@ -379,20 +433,15 @@ n:
/* offsets for stack frame layout */
#define LRSAVE 4
-#endif
+#define PPC_CREATE_STACK_FRAME(size) \
+ stwu r1,-(size)(r1); \
+ mflr r0; \
+ stw r0,(size+4)(r1)
-/* various errata or part fixups */
-#ifdef CONFIG_PPC601_SYNC_FIX
-#define SYNC sync; isync
-#define SYNC_601 sync
-#define ISYNC_601 isync
-#else
-#define SYNC
-#define SYNC_601
-#define ISYNC_601
#endif
-#if defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_FSL_BOOK3E)
+/* various errata or part fixups */
+#if defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_E500)
#define MFTB(dest) \
90: mfspr dest, SPRN_TBRL; \
BEGIN_FTR_SECTION_NESTED(96); \
@@ -411,8 +460,7 @@ END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96)
#define MFTBU(dest) mfspr dest, SPRN_TBRU
#endif
-/* tlbsync is not implemented on 601 */
-#if !defined(CONFIG_SMP) || defined(CONFIG_PPC_BOOK3S_601)
+#ifndef CONFIG_SMP
#define TLBSYNC
#else
#define TLBSYNC tlbsync; sync
@@ -460,7 +508,25 @@ END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96)
*/
#define DCBT_BOOK3S_STOP_ALL_STREAM_IDS(scratch) \
lis scratch,0x60000000@h; \
- dcbt 0,scratch,0b01010
+ .machine push; \
+ .machine power4; \
+ dcbt 0,scratch,0b01010; \
+ .machine pop;
+
+#define DCBT_SETUP_STREAMS(from, from_parms, to, to_parms, scratch) \
+ lis scratch,0x8000; /* GO=1 */ \
+ clrldi scratch,scratch,32; \
+ .machine push; \
+ .machine power4; \
+ /* setup read stream 0 */ \
+ dcbt 0,from,0b01000; /* addr from */ \
+ dcbt 0,from_parms,0b01010; /* length and depth from */ \
+ /* setup write stream 1 */ \
+ dcbtst 0,to,0b01000; /* addr to */ \
+ dcbtst 0,to_parms,0b01010; /* length and depth to */ \
+ eieio; \
+ dcbt 0,scratch,0b01010; /* all streams GO */ \
+ .machine pop;
/*
* toreal/fromreal/tophys/tovirt macros. 32-bit BookE makes them
@@ -506,15 +572,9 @@ END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96)
#endif
#ifdef CONFIG_PPC_BOOK3S_64
-#define RFI rfid
#define MTMSRD(r) mtmsrd r
#define MTMSR_EERI(reg) mtmsrd reg,1
#else
-#ifndef CONFIG_40x
-#define RFI rfi
-#else
-#define RFI rfi; b . /* Prevent prefetch past rfi */
-#endif
#define MTMSRD(r) mtmsr r
#define MTMSR_EERI(reg) mtmsr reg
#endif
@@ -749,11 +809,7 @@ END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96)
#define evr30 30
#define evr31 31
-/* some stab codes */
-#define N_FUN 36
-#define N_RSYM 64
-#define N_SLINE 68
-#define N_SO 100
+#define RFSCV .long 0x4c0000a4
/*
* Create an endian fixup trampoline
@@ -770,11 +826,11 @@ END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96)
* kernel is built for.
*/
-#ifdef CONFIG_PPC_BOOK3E
+#ifdef CONFIG_PPC_BOOK3E_64
#define FIXUP_ENDIAN
#else
/*
- * This version may be used in in HV or non-HV context.
+ * This version may be used in HV or non-HV context.
* MSR[EE] must be disabled.
*/
#define FIXUP_ENDIAN \
@@ -810,21 +866,26 @@ END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96)
.long 0x2402004c; /* hrfid */ \
191:
-#endif /* !CONFIG_PPC_BOOK3E */
+#endif /* !CONFIG_PPC_BOOK3E_64 */
#endif /* __ASSEMBLY__ */
-/*
- * Helper macro for exception table entries
- */
-#define EX_TABLE(_fault, _target) \
- stringify_in_c(.section __ex_table,"a";)\
- stringify_in_c(.balign 4;) \
- stringify_in_c(.long (_fault) - . ;) \
- stringify_in_c(.long (_target) - . ;) \
+#define SOFT_MASK_TABLE(_start, _end) \
+ stringify_in_c(.section __soft_mask_table,"a";)\
+ stringify_in_c(.balign 8;) \
+ stringify_in_c(.llong (_start);) \
+ stringify_in_c(.llong (_end);) \
stringify_in_c(.previous)
-#ifdef CONFIG_PPC_FSL_BOOK3E
+#define RESTART_TABLE(_start, _end, _target) \
+ stringify_in_c(.section __restart_table,"a";)\
+ stringify_in_c(.balign 8;) \
+ stringify_in_c(.llong (_start);) \
+ stringify_in_c(.llong (_end);) \
+ stringify_in_c(.llong (_target);) \
+ stringify_in_c(.previous)
+
+#ifdef CONFIG_PPC_E500
#define BTB_FLUSH(reg) \
lis reg,BUCSR_INIT@h; \
ori reg,reg,BUCSR_INIT@l; \
@@ -832,6 +893,14 @@ END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96)
isync;
#else
#define BTB_FLUSH(reg)
-#endif /* CONFIG_PPC_FSL_BOOK3E */
+#endif /* CONFIG_PPC_E500 */
+
+#if defined(CONFIG_PPC64_ELF_ABI_V1)
+#define STACK_FRAME_PARAMS 48
+#elif defined(CONFIG_PPC64_ELF_ABI_V2)
+#define STACK_FRAME_PARAMS 32
+#elif defined(CONFIG_PPC32)
+#define STACK_FRAME_PARAMS 8
+#endif
#endif /* _ASM_POWERPC_PPC_ASM_H */