diff options
author | Guo Ren <guoren@linux.alibaba.com> | 2020-12-24 02:02:55 +0000 |
---|---|---|
committer | Guo Ren <guoren@linux.alibaba.com> | 2021-01-12 09:52:40 +0800 |
commit | b0ae5e26b863f74aeaf73684d04dfb6fb72f836c (patch) | |
tree | 822975c71512edf8a85575b8f08fd745630d5e73 /arch/csky/mm/fault.c | |
parent | 7b513cf2bfdcdb7ba3f2b6e83f0e17e0793825d7 (diff) |
csky: Remove prologue of page fault handler in entry.S
There is a prologue on page fault handler which marking pages dirty
and/or accessed in page attributes, but all of these have been
handled in handle_pte_fault.
- Add flush_tlb_one in vmalloc page fault instead of prologue.
- Using cmxchg_fixup C codes in do_page_fault instead of ASM one.
Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Diffstat (limited to 'arch/csky/mm/fault.c')
-rw-r--r-- | arch/csky/mm/fault.c | 45 |
1 files changed, 40 insertions, 5 deletions
diff --git a/arch/csky/mm/fault.c b/arch/csky/mm/fault.c index 94eac13b9c97..e888acf1c403 100644 --- a/arch/csky/mm/fault.c +++ b/arch/csky/mm/fault.c @@ -39,20 +39,52 @@ int fixup_exception(struct pt_regs *regs) return 0; } +static inline bool is_write(struct pt_regs *regs) +{ + switch (trap_no(regs)) { + case VEC_TLBINVALIDS: + return true; + case VEC_TLBMODIFIED: + return true; + } + + return false; +} + +#ifdef CONFIG_CPU_HAS_LDSTEX +static inline void csky_cmpxchg_fixup(struct pt_regs *regs) +{ + return; +} +#else +extern unsigned long csky_cmpxchg_ldw; +extern unsigned long csky_cmpxchg_stw; +static inline void csky_cmpxchg_fixup(struct pt_regs *regs) +{ + if (trap_no(regs) != VEC_TLBMODIFIED) + return; + + if (instruction_pointer(regs) == csky_cmpxchg_stw) + instruction_pointer_set(regs, csky_cmpxchg_ldw); + return; +} +#endif + /* * This routine handles page faults. It determines the address, * and the problem, and then passes it off to one of the appropriate * routines. */ -asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write, - unsigned long mmu_meh) +asmlinkage void do_page_fault(struct pt_regs *regs) { struct vm_area_struct *vma = NULL; struct task_struct *tsk = current; struct mm_struct *mm = tsk->mm; int si_code; int fault; - unsigned long address = mmu_meh & PAGE_MASK; + unsigned long address = read_mmu_entryhi() & PAGE_MASK; + + csky_cmpxchg_fixup(regs); if (kprobe_page_fault(regs, tsk->thread.trap_no)) return; @@ -104,6 +136,9 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write, pte_k = pte_offset_kernel(pmd_k, address); if (!pte_present(*pte_k)) goto no_context; + + flush_tlb_one(address); + return; } @@ -132,7 +167,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write, good_area: si_code = SEGV_ACCERR; - if (write) { + if (is_write(regs)) { if (!(vma->vm_flags & VM_WRITE)) goto bad_area; } else { @@ -145,7 +180,7 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(vma, address, write ? FAULT_FLAG_WRITE : 0, + fault = handle_mm_fault(vma, address, is_write(regs) ? FAULT_FLAG_WRITE : 0, regs); if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) |