diff options
Diffstat (limited to 'arch/sparc/mm/fault_64.c')
-rw-r--r-- | arch/sparc/mm/fault_64.c | 74 |
1 files changed, 40 insertions, 34 deletions
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index 2371fb6b97e4..e326caf708c6 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c @@ -27,7 +27,6 @@ #include <linux/uaccess.h> #include <asm/page.h> -#include <asm/pgtable.h> #include <asm/openprom.h> #include <asm/oplib.h> #include <asm/asi.h> @@ -71,7 +70,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr) } /* - * We now make sure that mmap_sem is held in all paths that call + * We now make sure that mmap_lock is held in all paths that call * this. Additionally, to prevent kswapd from ripping ptes from * under us, raise interrupts around the time that we look at the * pte, kswapd will have to wait to get his smp ipi response from @@ -80,6 +79,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr) static unsigned int get_user_insn(unsigned long tpc) { pgd_t *pgdp = pgd_offset(current->mm, tpc); + p4d_t *p4dp; pud_t *pudp; pmd_t *pmdp; pte_t *ptep, pte; @@ -88,7 +88,10 @@ static unsigned int get_user_insn(unsigned long tpc) if (pgd_none(*pgdp) || unlikely(pgd_bad(*pgdp))) goto out; - pudp = pud_offset(pgdp, tpc); + p4dp = p4d_offset(pgdp, tpc); + if (p4d_none(*p4dp) || unlikely(p4d_bad(*p4dp))) + goto out; + pudp = pud_offset(p4dp, tpc); if (pud_none(*pudp) || unlikely(pud_bad(*pudp))) goto out; @@ -96,6 +99,7 @@ static unsigned int get_user_insn(unsigned long tpc) local_irq_disable(); pmdp = pmd_offset(pudp, tpc); +again: if (pmd_none(*pmdp) || unlikely(pmd_bad(*pmdp))) goto out_irq_enable; @@ -112,6 +116,8 @@ static unsigned int get_user_insn(unsigned long tpc) #endif { ptep = pte_offset_map(pmdp, tpc); + if (!ptep) + goto again; pte = *ptep; if (pte_present(pte)) { pa = (pte_pfn(pte) << PAGE_SHIFT); @@ -173,7 +179,7 @@ static void do_fault_siginfo(int code, int sig, struct pt_regs *regs, if (unlikely(show_unhandled_signals)) show_signal_msg(regs, sig, code, addr, current); - force_sig_fault(sig, code, (void __user *) addr, 0); + force_sig_fault(sig, code, (void __user *) addr); } static unsigned int get_fault_insn(struct pt_regs *regs, unsigned int insn) @@ -267,7 +273,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) int si_code, fault_code; vm_fault_t fault; unsigned long address, mm_rss; - unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; + unsigned int flags = FAULT_FLAG_DEFAULT; fault_code = get_thread_fault_code(); @@ -315,7 +321,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); - if (!down_read_trylock(&mm->mmap_sem)) { + if (!mmap_read_trylock(mm)) { if ((regs->tstate & TSTATE_PRIV) && !search_exception_tables(regs->tpc)) { insn = get_fault_insn(regs, insn); @@ -323,7 +329,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) } retry: - down_read(&mm->mmap_sem); + mmap_read_lock(mm); } if (fault_code & FAULT_CODE_BAD_RA) @@ -380,8 +386,9 @@ continue_fault: goto bad_area; } } - if (expand_stack(vma, address)) - goto bad_area; + vma = expand_stack(mm, address); + if (!vma) + goto bad_area_nosemaphore; /* * Ok, we have a good vm_area for this memory access, so * we can handle it.. @@ -419,10 +426,19 @@ good_area: goto bad_area; } - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, regs); - if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) + if (fault_signal_pending(fault, regs)) { + if (regs->tstate & TSTATE_PRIV) { + insn = get_fault_insn(regs, insn); + goto handle_kernel_fault; + } goto exit_exception; + } + + /* The fault is fully completed (including releasing mmap lock) */ + if (fault & VM_FAULT_COMPLETED) + goto lock_released; if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) @@ -434,30 +450,19 @@ good_area: BUG(); } - if (flags & FAULT_FLAG_ALLOW_RETRY) { - if (fault & VM_FAULT_MAJOR) { - current->maj_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, - 1, regs, address); - } else { - current->min_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, - 1, regs, address); - } - if (fault & VM_FAULT_RETRY) { - flags &= ~FAULT_FLAG_ALLOW_RETRY; - flags |= FAULT_FLAG_TRIED; + if (fault & VM_FAULT_RETRY) { + flags |= FAULT_FLAG_TRIED; - /* No need to up_read(&mm->mmap_sem) as we would - * have already released it in __lock_page_or_retry - * in mm/filemap.c. - */ + /* No need to mmap_read_unlock(mm) as we would + * have already released it in __lock_page_or_retry + * in mm/filemap.c. + */ - goto retry; - } + goto retry; } - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); +lock_released: mm_rss = get_mm_rss(mm); #if defined(CONFIG_TRANSPARENT_HUGEPAGE) mm_rss -= (mm->context.thp_pte_count * (HPAGE_SIZE / PAGE_SIZE)); @@ -486,8 +491,9 @@ exit_exception: * Fix it, but check if it's kernel or user first.. */ bad_area: + mmap_read_unlock(mm); +bad_area_nosemaphore: insn = get_fault_insn(regs, insn); - up_read(&mm->mmap_sem); handle_kernel_fault: do_kernel_fault(regs, si_code, fault_code, insn, address); @@ -499,7 +505,7 @@ handle_kernel_fault: */ out_of_memory: insn = get_fault_insn(regs, insn); - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); if (!(regs->tstate & TSTATE_PRIV)) { pagefault_out_of_memory(); goto exit_exception; @@ -512,7 +518,7 @@ intr_or_no_mm: do_sigbus: insn = get_fault_insn(regs, insn); - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); /* * Send a sigbus, regardless of whether we were in kernel |