diff options
Diffstat (limited to 'arch/arc/mm/fault.c')
| -rw-r--r-- | arch/arc/mm/fault.c | 64 |
1 files changed, 28 insertions, 36 deletions
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c index 72f5405a7ec5..95119a5e7761 100644 --- a/arch/arc/mm/fault.c +++ b/arch/arc/mm/fault.c @@ -13,7 +13,7 @@ #include <linux/kdebug.h> #include <linux/perf_event.h> #include <linux/mm_types.h> -#include <asm/pgalloc.h> +#include <asm/entry.h> #include <asm/mmu.h> /* @@ -34,28 +34,34 @@ noinline static int handle_kernel_vaddr_fault(unsigned long address) pud_t *pud, *pud_k; pmd_t *pmd, *pmd_k; - pgd = pgd_offset_fast(current->active_mm, address); + pgd = pgd_offset(current->active_mm, address); pgd_k = pgd_offset_k(address); - if (!pgd_present(*pgd_k)) + if (pgd_none (*pgd_k)) goto bad_area; + if (!pgd_present(*pgd)) + set_pgd(pgd, *pgd_k); p4d = p4d_offset(pgd, address); p4d_k = p4d_offset(pgd_k, address); - if (!p4d_present(*p4d_k)) + if (p4d_none(*p4d_k)) goto bad_area; + if (!p4d_present(*p4d)) + set_p4d(p4d, *p4d_k); pud = pud_offset(p4d, address); pud_k = pud_offset(p4d_k, address); - if (!pud_present(*pud_k)) + if (pud_none(*pud_k)) goto bad_area; + if (!pud_present(*pud)) + set_pud(pud, *pud_k); pmd = pmd_offset(pud, address); pmd_k = pmd_offset(pud_k, address); - if (!pmd_present(*pmd_k)) + if (pmd_none(*pmd_k)) goto bad_area; - - set_pmd(pmd, *pmd_k); + if (!pmd_present(*pmd)) + set_pmd(pmd, *pmd_k); /* XXX: create the TLB entry here */ return 0; @@ -94,10 +100,10 @@ void do_page_fault(unsigned long address, struct pt_regs *regs) if (faulthandler_disabled() || !mm) goto no_context; - if (regs->ecr_cause & ECR_C_PROTV_STORE) /* ST/EX */ + if (regs->ecr.cause & ECR_C_PROTV_STORE) /* ST/EX */ write = 1; - else if ((regs->ecr_vec == ECR_V_PROTV) && - (regs->ecr_cause == ECR_C_PROTV_INST_FETCH)) + else if ((regs->ecr.vec == ECR_V_PROTV) && + (regs->ecr.cause == ECR_C_PROTV_INST_FETCH)) exec = 1; flags = FAULT_FLAG_DEFAULT; @@ -106,16 +112,11 @@ void do_page_fault(unsigned long address, struct pt_regs *regs) if (write) flags |= FAULT_FLAG_WRITE; + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); retry: - mmap_read_lock(mm); - - vma = find_vma(mm, address); + vma = lock_mm_and_find_vma(mm, address, regs); if (!vma) - goto bad_area; - if (unlikely(address < vma->vm_start)) { - if (!(vma->vm_flags & VM_GROWSDOWN) || expand_stack(vma, address)) - goto bad_area; - } + goto bad_area_nosemaphore; /* * vm_area is good, now check permissions for this memory access @@ -131,7 +132,7 @@ retry: goto bad_area; } - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, regs); /* Quick path to respond to signals */ if (fault_signal_pending(fault, regs)) { @@ -140,11 +141,14 @@ retry: return; } + /* The fault is fully completed (including releasing mmap lock) */ + if (fault & VM_FAULT_COMPLETED) + return; + /* * Fault retry nuances, mmap_lock already relinquished by core mm */ - if (unlikely((fault & VM_FAULT_RETRY) && - (flags & FAULT_FLAG_ALLOW_RETRY))) { + if (unlikely(fault & VM_FAULT_RETRY)) { flags |= FAULT_FLAG_TRIED; goto retry; } @@ -152,26 +156,14 @@ retry: bad_area: mmap_read_unlock(mm); +bad_area_nosemaphore: /* * Major/minor page fault accounting * (in case of retry we only land here once) */ - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); - - if (likely(!(fault & VM_FAULT_ERROR))) { - if (fault & VM_FAULT_MAJOR) { - tsk->maj_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, - regs, address); - } else { - tsk->min_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, - regs, address); - } - + if (likely(!(fault & VM_FAULT_ERROR))) /* Normal return path: fault Handled Gracefully */ return; - } if (!user_mode(regs)) goto no_context; |
