From 7a24ae2e172f770df07f8e48ed3ed2f3a6b17e37 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Sat, 30 Jan 2021 23:08:13 +1000 Subject: powerpc/32s: move DABR match out of handle_page_fault handle_page_fault() has some code dedicated to book3s/32 to call do_break() when the DSI is a DABR match. On other platforms, do_break() is handled separately. Do the same for book3s/32, do it earlier in the process of DSI. This change also avoid doing the test on ISI. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20210130130852.2952424-4-npiggin@gmail.com --- arch/powerpc/kernel/entry_32.S | 15 --------------- 1 file changed, 15 deletions(-) (limited to 'arch/powerpc/kernel/entry_32.S') diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 1c9b0ccc2172..238eacfda7b0 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -670,10 +670,6 @@ ppc_swapcontext: .globl handle_page_fault handle_page_fault: addi r3,r1,STACK_FRAME_OVERHEAD -#ifdef CONFIG_PPC_BOOK3S_32 - andis. r0,r5,DSISR_DABRMATCH@h - bne- handle_dabr_fault -#endif bl do_page_fault cmpwi r3,0 beq+ ret_from_except @@ -687,17 +683,6 @@ handle_page_fault: bl __bad_page_fault b ret_from_except_full -#ifdef CONFIG_PPC_BOOK3S_32 - /* We have a data breakpoint exception - handle it */ -handle_dabr_fault: - SAVE_NVGPRS(r1) - lwz r0,_TRAP(r1) - clrrwi r0,r0,1 - stw r0,_TRAP(r1) - bl do_break - b ret_from_except_full -#endif - /* * This routine switches between two different tasks. The process * state of one is saved on its kernel stack. Then the state -- cgit From 73d7a97914f23397b012e851f6a1fe4061923a82 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Sat, 30 Jan 2021 23:08:20 +1000 Subject: powerpc/32: transfer can avoid saving r4/r5 over trace call Now that handlers get all registers from pt_regs, r4 and r5 are no longer live here and may be clobbered. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20210130130852.2952424-11-npiggin@gmail.com --- arch/powerpc/kernel/entry_32.S | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) (limited to 'arch/powerpc/kernel/entry_32.S') diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 238eacfda7b0..d6ea3f2d6cc0 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -276,8 +276,7 @@ reenable_mmu: * We save a bunch of GPRs, * r3 can be different from GPR3(r1) at this point, r9 and r11 * contains the old MSR and handler address respectively, - * r4 & r5 can contain page fault arguments that need to be passed - * along as well. r0, r6-r8, r12, CCR, CTR, XER etc... are left + * r0, r4-r8, r12, CCR, CTR, XER etc... are left * clobbered as they aren't useful past this point. */ @@ -285,15 +284,11 @@ reenable_mmu: stw r9,8(r1) stw r11,12(r1) stw r3,16(r1) - stw r4,20(r1) - stw r5,24(r1) /* If we are disabling interrupts (normal case), simply log it with * lockdep */ 1: bl trace_hardirqs_off - lwz r5,24(r1) - lwz r4,20(r1) lwz r3,16(r1) lwz r11,12(r1) lwz r9,8(r1) -- cgit From 8458c628a53ba4311b2df12370be1a6f1870ff37 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Sat, 30 Jan 2021 23:08:21 +1000 Subject: powerpc: bad_page_fault get registers from regs Similar to the previous patch this makes interrupt handler function types more regular so they can be wrapped with the next patch. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20210130130852.2952424-12-npiggin@gmail.com --- arch/powerpc/kernel/entry_32.S | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'arch/powerpc/kernel/entry_32.S') diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index d6ea3f2d6cc0..b102b40c4988 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -672,9 +672,8 @@ handle_page_fault: lwz r0,_TRAP(r1) clrrwi r0,r0,1 stw r0,_TRAP(r1) - mr r5,r3 + mr r4,r3 /* err arg for bad_page_fault */ addi r3,r1,STACK_FRAME_OVERHEAD - lwz r4,_DAR(r1) bl __bad_page_fault b ret_from_except_full -- cgit From 57fdfbce89137ae85cd5cef48be168040a47dd13 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 8 Feb 2021 15:10:20 +0000 Subject: powerpc/32s: Add missing call to kuep_lock on syscall entry Userspace Execution protection and fast syscall entry were implemented independently from each other and were both merged in kernel 5.2, leading to syscall entry missing userspace execution protection. On syscall entry, execution of user space memory must be locked in the same way as on exception entry. Fixes: b86fb88855ea ("powerpc/32: implement fast entry for syscalls on non BOOKE") Cc: stable@vger.kernel.org Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/c65e105b63aaf74f91a14f845bc77192350b84a6.1612796617.git.christophe.leroy@csgroup.eu --- arch/powerpc/kernel/entry_32.S | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch/powerpc/kernel/entry_32.S') diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index b102b40c4988..b1e36602c013 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -351,6 +351,9 @@ trace_syscall_entry_irq_off: .globl transfer_to_syscall transfer_to_syscall: +#ifdef CONFIG_PPC_BOOK3S_32 + kuep_lock r11, r12 +#endif #ifdef CONFIG_TRACE_IRQFLAGS andi. r12,r9,MSR_EE beq- trace_syscall_entry_irq_off -- cgit From fbcee2ebe8edbb6a93316f0a189ae7fcfaa7094f Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 8 Feb 2021 15:10:32 +0000 Subject: powerpc/32: Always save non volatile GPRs at syscall entry In preparation for porting syscall entry/exit to C, inconditionally save non volatile general purpose registers. Commit 965dd3ad3076 ("powerpc/64/syscall: Remove non-volatile GPR save optimisation") provides detailed explanation. This increases the number of cycles by 24 cycles on 8xx with null_syscall benchmark (280 => 304 cycles) Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/21c08162b83655195fe9ead78ff2cfd28508d023.1612796617.git.christophe.leroy@csgroup.eu --- arch/powerpc/kernel/entry_32.S | 46 +----------------------------------------- 1 file changed, 1 insertion(+), 45 deletions(-) (limited to 'arch/powerpc/kernel/entry_32.S') diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index b1e36602c013..97dc28a68465 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -351,6 +351,7 @@ trace_syscall_entry_irq_off: .globl transfer_to_syscall transfer_to_syscall: + SAVE_NVGPRS(r1) #ifdef CONFIG_PPC_BOOK3S_32 kuep_lock r11, r12 #endif @@ -614,51 +615,6 @@ ret_from_kernel_syscall: #endif _ASM_NOKPROBE_SYMBOL(ret_from_kernel_syscall) -/* - * The fork/clone functions need to copy the full register set into - * the child process. Therefore we need to save all the nonvolatile - * registers (r13 - r31) before calling the C code. - */ - .globl ppc_fork -ppc_fork: - SAVE_NVGPRS(r1) - lwz r0,_TRAP(r1) - rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */ - stw r0,_TRAP(r1) /* register set saved */ - b sys_fork - - .globl ppc_vfork -ppc_vfork: - SAVE_NVGPRS(r1) - lwz r0,_TRAP(r1) - rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */ - stw r0,_TRAP(r1) /* register set saved */ - b sys_vfork - - .globl ppc_clone -ppc_clone: - SAVE_NVGPRS(r1) - lwz r0,_TRAP(r1) - rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */ - stw r0,_TRAP(r1) /* register set saved */ - b sys_clone - - .globl ppc_clone3 -ppc_clone3: - SAVE_NVGPRS(r1) - lwz r0,_TRAP(r1) - rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */ - stw r0,_TRAP(r1) /* register set saved */ - b sys_clone3 - - .globl ppc_swapcontext -ppc_swapcontext: - SAVE_NVGPRS(r1) - lwz r0,_TRAP(r1) - rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */ - stw r0,_TRAP(r1) /* register set saved */ - b sys_swapcontext - /* * Top-level page fault handling. * This is in assembler because if do_page_fault tells us that -- cgit From 6f76a01173ccaa363739f913394d4e138d92d718 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 8 Feb 2021 15:10:33 +0000 Subject: powerpc/syscall: implement system call entry/exit logic in C for PPC32 That's port of PPC64 syscall entry/exit logic in C to PPC32. Performancewise on 8xx: Before : 304 cycles on null_syscall After : 348 cycles on null_syscall Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/a93b08e1275e9d1f0b1c39043d1b827586b2b401.1612796617.git.christophe.leroy@csgroup.eu --- arch/powerpc/kernel/entry_32.S | 224 ++++++----------------------------------- 1 file changed, 30 insertions(+), 194 deletions(-) (limited to 'arch/powerpc/kernel/entry_32.S') diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 97dc28a68465..bbf7ecea6fe0 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -329,117 +329,23 @@ stack_ovf: _ASM_NOKPROBE_SYMBOL(stack_ovf) #endif -#ifdef CONFIG_TRACE_IRQFLAGS -trace_syscall_entry_irq_off: - /* - * Syscall shouldn't happen while interrupts are disabled, - * so let's do a warning here. - */ -0: trap - EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING - bl trace_hardirqs_on - - /* Now enable for real */ - LOAD_REG_IMMEDIATE(r10, MSR_KERNEL | MSR_EE) - mtmsr r10 - - REST_GPR(0, r1) - REST_4GPRS(3, r1) - REST_2GPRS(7, r1) - b DoSyscall -#endif /* CONFIG_TRACE_IRQFLAGS */ - .globl transfer_to_syscall transfer_to_syscall: SAVE_NVGPRS(r1) #ifdef CONFIG_PPC_BOOK3S_32 kuep_lock r11, r12 #endif -#ifdef CONFIG_TRACE_IRQFLAGS - andi. r12,r9,MSR_EE - beq- trace_syscall_entry_irq_off -#endif /* CONFIG_TRACE_IRQFLAGS */ -/* - * Handle a system call. - */ - .stabs "arch/powerpc/kernel/",N_SO,0,0,0f - .stabs "entry_32.S",N_SO,0,0,0f -0: - -_GLOBAL(DoSyscall) - stw r3,ORIG_GPR3(r1) - li r12,0 - stw r12,RESULT(r1) -#ifdef CONFIG_TRACE_IRQFLAGS - /* Make sure interrupts are enabled */ - mfmsr r11 - andi. r12,r11,MSR_EE - /* We came in with interrupts disabled, we WARN and mark them enabled - * for lockdep now */ -0: tweqi r12, 0 - EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING -#endif /* CONFIG_TRACE_IRQFLAGS */ - lwz r11,TI_FLAGS(r2) - andi. r11,r11,_TIF_SYSCALL_DOTRACE - bne- syscall_dotrace -syscall_dotrace_cont: - cmplwi 0,r0,NR_syscalls - lis r10,sys_call_table@h - ori r10,r10,sys_call_table@l - slwi r0,r0,2 - bge- 66f - - barrier_nospec_asm - /* - * Prevent the load of the handler below (based on the user-passed - * system call number) being speculatively executed until the test - * against NR_syscalls and branch to .66f above has - * committed. - */ + /* Calling convention has r9 = orig r0, r10 = regs */ + addi r10,r1,STACK_FRAME_OVERHEAD + mr r9,r0 + stw r10,THREAD+PT_REGS(r2) + bl system_call_exception - lwzx r10,r10,r0 /* Fetch system call handler [ptr] */ - mtlr r10 - addi r9,r1,STACK_FRAME_OVERHEAD - PPC440EP_ERR42 - blrl /* Call handler */ - .globl ret_from_syscall ret_from_syscall: -#ifdef CONFIG_DEBUG_RSEQ - /* Check whether the syscall is issued inside a restartable sequence */ - stw r3,GPR3(r1) - addi r3,r1,STACK_FRAME_OVERHEAD - bl rseq_syscall - lwz r3,GPR3(r1) -#endif - mr r6,r3 - /* disable interrupts so current_thread_info()->flags can't change */ - LOAD_REG_IMMEDIATE(r10,MSR_KERNEL) /* doesn't include MSR_EE */ - /* Note: We don't bother telling lockdep about it */ - mtmsr r10 - lwz r9,TI_FLAGS(r2) - li r8,-MAX_ERRNO - andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK) - bne- syscall_exit_work - cmplw 0,r3,r8 - blt+ syscall_exit_cont - lwz r11,_CCR(r1) /* Load CR */ - neg r3,r3 - oris r11,r11,0x1000 /* Set SO bit in CR */ - stw r11,_CCR(r1) -syscall_exit_cont: - lwz r8,_MSR(r1) -#ifdef CONFIG_TRACE_IRQFLAGS - /* If we are going to return from the syscall with interrupts - * off, we trace that here. It shouldn't normally happen. - */ - andi. r10,r8,MSR_EE - bne+ 1f - stw r3,GPR3(r1) - bl trace_hardirqs_off - lwz r3,GPR3(r1) -1: -#endif /* CONFIG_TRACE_IRQFLAGS */ + addi r4,r1,STACK_FRAME_OVERHEAD + li r5,0 + bl syscall_exit_prepare #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) /* If the process has its own DBCR0 value, load it up. The internal debug mode bit tells us that dbcr0 should be loaded. */ @@ -453,12 +359,6 @@ syscall_exit_cont: cmplwi cr0,r5,0 bne- 2f #endif /* CONFIG_PPC_47x */ -1: -BEGIN_FTR_SECTION - lwarx r7,0,r1 -END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX) - stwcx. r0,0,r1 /* to clear the reservation */ - ACCOUNT_CPU_USER_EXIT(r2, r5, r7) #ifdef CONFIG_PPC_BOOK3S_32 kuep_unlock r5, r7 #endif @@ -466,21 +366,36 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX) lwz r4,_LINK(r1) lwz r5,_CCR(r1) mtlr r4 - mtcr r5 lwz r7,_NIP(r1) - lwz r2,GPR2(r1) - lwz r1,GPR1(r1) + lwz r8,_MSR(r1) + cmpwi r3,0 + lwz r3,GPR3(r1) syscall_exit_finish: -#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS) - mtspr SPRN_NRI, r0 -#endif mtspr SPRN_SRR0,r7 mtspr SPRN_SRR1,r8 + + bne 3f + mtcr r5 + +1: lwz r2,GPR2(r1) + lwz r1,GPR1(r1) rfi #ifdef CONFIG_40x b . /* Prevent prefetch past rfi */ #endif -_ASM_NOKPROBE_SYMBOL(syscall_exit_finish) + +3: mtcr r5 + lwz r4,_CTR(r1) + lwz r5,_XER(r1) + REST_NVGPRS(r1) + mtctr r4 + mtxer r5 + lwz r0,GPR0(r1) + lwz r3,GPR3(r1) + REST_8GPRS(4,r1) + lwz r12,GPR12(r1) + b 1b + #ifdef CONFIG_44x 2: li r7,0 iccci r0,r0 @@ -488,9 +403,6 @@ _ASM_NOKPROBE_SYMBOL(syscall_exit_finish) b 1b #endif /* CONFIG_44x */ -66: li r3,-ENOSYS - b ret_from_syscall - .globl ret_from_fork ret_from_fork: REST_NVGPRS(r1) @@ -509,82 +421,6 @@ ret_from_kernel_thread: li r3,0 b ret_from_syscall -/* Traced system call support */ -syscall_dotrace: - SAVE_NVGPRS(r1) - li r0,0xc00 - stw r0,_TRAP(r1) - addi r3,r1,STACK_FRAME_OVERHEAD - bl do_syscall_trace_enter - /* - * Restore argument registers possibly just changed. - * We use the return value of do_syscall_trace_enter - * for call number to look up in the table (r0). - */ - mr r0,r3 - lwz r3,GPR3(r1) - lwz r4,GPR4(r1) - lwz r5,GPR5(r1) - lwz r6,GPR6(r1) - lwz r7,GPR7(r1) - lwz r8,GPR8(r1) - REST_NVGPRS(r1) - - cmplwi r0,NR_syscalls - /* Return code is already in r3 thanks to do_syscall_trace_enter() */ - bge- ret_from_syscall - b syscall_dotrace_cont - -syscall_exit_work: - andi. r0,r9,_TIF_RESTOREALL - beq+ 0f - REST_NVGPRS(r1) - b 2f -0: cmplw 0,r3,r8 - blt+ 1f - andi. r0,r9,_TIF_NOERROR - bne- 1f - lwz r11,_CCR(r1) /* Load CR */ - neg r3,r3 - oris r11,r11,0x1000 /* Set SO bit in CR */ - stw r11,_CCR(r1) - -1: stw r6,RESULT(r1) /* Save result */ - stw r3,GPR3(r1) /* Update return value */ -2: andi. r0,r9,(_TIF_PERSYSCALL_MASK) - beq 4f - - /* Clear per-syscall TIF flags if any are set. */ - - li r11,_TIF_PERSYSCALL_MASK - addi r12,r2,TI_FLAGS -3: lwarx r8,0,r12 - andc r8,r8,r11 - stwcx. r8,0,r12 - bne- 3b - -4: /* Anything which requires enabling interrupts? */ - andi. r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP) - beq ret_from_except - - /* Re-enable interrupts. There is no need to trace that with - * lockdep as we are supposed to have IRQs on at this point - */ - ori r10,r10,MSR_EE - mtmsr r10 - - /* Save NVGPRS if they're not saved already */ - lwz r4,_TRAP(r1) - andi. r4,r4,1 - beq 5f - SAVE_NVGPRS(r1) - li r4,0xc00 - stw r4,_TRAP(r1) -5: - addi r3,r1,STACK_FRAME_OVERHEAD - bl do_syscall_trace_leave - b ret_from_except_full - /* * System call was called from kernel. We get here with SRR1 in r9. * Mark the exception as recoverable once we have retrieved SRR0, -- cgit From 4d67facbcbdb3d9e3c9cb82e4ec47fc63d298dd8 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 8 Feb 2021 15:10:34 +0000 Subject: powerpc/32: Remove verification of MSR_PR on syscall in the ASM entry system_call_exception() checks MSR_PR and BUGs if a syscall is issued from kernel mode. No need to handle it anymore from the ASM entry code. null_syscall reduction 2 cycles (348 => 346 cycles) Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/1eddb42cb12092b1e3d72608d182c365db3da41d.1612796617.git.christophe.leroy@csgroup.eu --- arch/powerpc/kernel/entry_32.S | 30 ------------------------------ 1 file changed, 30 deletions(-) (limited to 'arch/powerpc/kernel/entry_32.S') diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index bbf7ecea6fe0..cffe58e63356 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -421,36 +421,6 @@ ret_from_kernel_thread: li r3,0 b ret_from_syscall - /* - * System call was called from kernel. We get here with SRR1 in r9. - * Mark the exception as recoverable once we have retrieved SRR0, - * trap a warning and return ENOSYS with CR[SO] set. - */ - .globl ret_from_kernel_syscall -ret_from_kernel_syscall: - mfspr r9, SPRN_SRR0 - mfspr r10, SPRN_SRR1 -#if !defined(CONFIG_4xx) && !defined(CONFIG_BOOKE) - LOAD_REG_IMMEDIATE(r11, MSR_KERNEL & ~(MSR_IR|MSR_DR)) - mtmsr r11 -#endif - -0: trap - EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING - - li r3, ENOSYS - crset so -#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS) - mtspr SPRN_NRI, r0 -#endif - mtspr SPRN_SRR0, r9 - mtspr SPRN_SRR1, r10 - rfi -#ifdef CONFIG_40x - b . /* Prevent prefetch past rfi */ -#endif -_ASM_NOKPROBE_SYMBOL(ret_from_kernel_syscall) - /* * Top-level page fault handling. * This is in assembler because if do_page_fault tells us that -- cgit From eb595eca74067b78d36fb188b555e30f28686fc7 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Mon, 8 Feb 2021 15:10:41 +0000 Subject: powerpc/32: Remove the counter in global_dbcr0 global_dbcr0 has two parts, 4 bytes to save/restore the value of SPRN_DBCR0, and 4 bytes that are incremented/decremented everytime something is saving/loading the above value. This counter is only incremented/decremented, its value is never used and never read. Remove the counter and devide the size of global_dbcr0 by 2. Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/7e381dc58b3f583556cfab37ba5d813bfd5cce1e.1612796617.git.christophe.leroy@csgroup.eu --- arch/powerpc/kernel/entry_32.S | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) (limited to 'arch/powerpc/kernel/entry_32.S') diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index cffe58e63356..9dd90be9f8a5 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -175,14 +175,11 @@ transfer_to_handler: addi r11,r11,global_dbcr0@l #ifdef CONFIG_SMP lwz r9,TASK_CPU(r2) - slwi r9,r9,3 + slwi r9,r9,2 add r11,r11,r9 #endif lwz r12,0(r11) mtspr SPRN_DBCR0,r12 - lwz r12,4(r11) - addi r12,r12,-1 - stw r12,4(r11) #endif b 3f @@ -981,14 +978,11 @@ load_dbcr0: addi r11,r11,global_dbcr0@l #ifdef CONFIG_SMP lwz r9,TASK_CPU(r2) - slwi r9,r9,3 + slwi r9,r9,2 add r11,r11,r9 #endif stw r10,0(r11) mtspr SPRN_DBCR0,r0 - lwz r10,4(r11) - addi r10,r10,1 - stw r10,4(r11) li r11,-1 mtspr SPRN_DBSR,r11 /* clear all pending debug events */ blr @@ -997,7 +991,7 @@ load_dbcr0: .align 4 .global global_dbcr0 global_dbcr0: - .space 8*NR_CPUS + .space 4*NR_CPUS .previous #endif /* !(CONFIG_4xx || CONFIG_BOOKE) */ -- cgit From d524dda719f06967db4d3ba519edf9267f84c155 Mon Sep 17 00:00:00 2001 From: Christophe Leroy Date: Tue, 9 Feb 2021 19:29:28 +0000 Subject: powerpc/32: Handle bookE debugging in C in syscall entry/exit The handling of SPRN_DBCR0 and other registers can easily be done in C instead of ASM. For that, create booke_load_dbcr0() and booke_restore_dbcr0(). Signed-off-by: Christophe Leroy Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/1a7515f9258b27a9177de88491a8bb79b255ceb7.1612898425.git.christophe.leroy@csgroup.eu --- arch/powerpc/kernel/entry_32.S | 7 ------- 1 file changed, 7 deletions(-) (limited to 'arch/powerpc/kernel/entry_32.S') diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 9dd90be9f8a5..78c430b7f9d9 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -343,13 +343,6 @@ ret_from_syscall: addi r4,r1,STACK_FRAME_OVERHEAD li r5,0 bl syscall_exit_prepare -#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) - /* If the process has its own DBCR0 value, load it up. The internal - debug mode bit tells us that dbcr0 should be loaded. */ - lwz r0,THREAD+THREAD_DBCR0(r2) - andis. r10,r0,DBCR0_IDM@h - bnel- load_dbcr0 -#endif #ifdef CONFIG_PPC_47x lis r4,icache_44x_need_flush@ha lwz r5,icache_44x_need_flush@l(r4) -- cgit