diff options
Diffstat (limited to 'arch/xtensa/mm/fault.c')
| -rw-r--r-- | arch/xtensa/mm/fault.c | 181 |
1 files changed, 91 insertions, 90 deletions
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c index 2ab0e0dcd166..16e11b6f6f78 100644 --- a/arch/xtensa/mm/fault.c +++ b/arch/xtensa/mm/fault.c @@ -20,11 +20,63 @@ #include <asm/mmu_context.h> #include <asm/cacheflush.h> #include <asm/hardirq.h> -#include <asm/pgalloc.h> +#include <asm/traps.h> -DEFINE_PER_CPU(unsigned long, asid_cache) = ASID_USER_FIRST; void bad_page_fault(struct pt_regs*, unsigned long, int); +static void vmalloc_fault(struct pt_regs *regs, unsigned int address) +{ +#ifdef CONFIG_MMU + /* Synchronize this task's top level page-table + * with the 'reference' page table. + */ + struct mm_struct *act_mm = current->active_mm; + int index = pgd_index(address); + pgd_t *pgd, *pgd_k; + p4d_t *p4d, *p4d_k; + pud_t *pud, *pud_k; + pmd_t *pmd, *pmd_k; + pte_t *pte_k; + + if (act_mm == NULL) + goto bad_page_fault; + + pgd = act_mm->pgd + index; + pgd_k = init_mm.pgd + index; + + if (!pgd_present(*pgd_k)) + goto bad_page_fault; + + pgd_val(*pgd) = pgd_val(*pgd_k); + + p4d = p4d_offset(pgd, address); + p4d_k = p4d_offset(pgd_k, address); + if (!p4d_present(*p4d) || !p4d_present(*p4d_k)) + goto bad_page_fault; + + pud = pud_offset(p4d, address); + pud_k = pud_offset(p4d_k, address); + if (!pud_present(*pud) || !pud_present(*pud_k)) + goto bad_page_fault; + + pmd = pmd_offset(pud, address); + pmd_k = pmd_offset(pud_k, address); + if (!pmd_present(*pmd) || !pmd_present(*pmd_k)) + goto bad_page_fault; + + pmd_val(*pmd) = pmd_val(*pmd_k); + pte_k = pte_offset_kernel(pmd_k, address); + + if (!pte_present(*pte_k)) + goto bad_page_fault; + return; + +bad_page_fault: + bad_page_fault(regs, address, SIGKILL); +#else + WARN_ONCE(1, "%s in noMMU configuration\n", __func__); +#endif +} /* * This routine handles page faults. It determines the address, * and the problem, and then passes it off to one of the appropriate @@ -43,15 +95,17 @@ void do_page_fault(struct pt_regs *regs) int is_write, is_exec; vm_fault_t fault; - unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; + unsigned int flags = FAULT_FLAG_DEFAULT; code = SEGV_MAPERR; /* We fault-in kernel-space virtual memory on-demand. The * 'reference' page table is init_mm.pgd. */ - if (address >= TASK_SIZE && !user_mode(regs)) - goto vmalloc_fault; + if (address >= TASK_SIZE && !user_mode(regs)) { + vmalloc_fault(regs, address); + return; + } /* If we're in an interrupt or have no user * context, we must not take the fault.. @@ -73,24 +127,18 @@ void do_page_fault(struct pt_regs *regs) if (user_mode(regs)) flags |= FAULT_FLAG_USER; -retry: - down_read(&mm->mmap_sem); - vma = find_vma(mm, address); + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); + +retry: + vma = lock_mm_and_find_vma(mm, address, regs); if (!vma) - goto bad_area; - if (vma->vm_start <= address) - goto good_area; - if (!(vma->vm_flags & VM_GROWSDOWN)) - goto bad_area; - if (expand_stack(vma, address)) - goto bad_area; + goto bad_area_nosemaphore; /* Ok, we have a good vm_area for this memory access, so * we can handle it.. */ -good_area: code = SEGV_ACCERR; if (is_write) { @@ -108,9 +156,16 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(vma, address, flags); + fault = handle_mm_fault(vma, address, flags, regs); + + if (fault_signal_pending(fault, regs)) { + if (!user_mode(regs)) + bad_page_fault(regs, address, SIGKILL); + return; + } - if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) + /* The fault is fully completed (including releasing mmap lock) */ + if (fault & VM_FAULT_COMPLETED) return; if (unlikely(fault & VM_FAULT_ERROR)) { @@ -122,42 +177,29 @@ good_area: goto do_sigbus; BUG(); } - if (flags & FAULT_FLAG_ALLOW_RETRY) { - if (fault & VM_FAULT_MAJOR) - current->maj_flt++; - else - current->min_flt++; - if (fault & VM_FAULT_RETRY) { - flags &= ~FAULT_FLAG_ALLOW_RETRY; - flags |= FAULT_FLAG_TRIED; - - /* No need to up_read(&mm->mmap_sem) as we would - * have already released it in __lock_page_or_retry - * in mm/filemap.c. - */ - - goto retry; - } - } - up_read(&mm->mmap_sem); - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); - if (flags & VM_FAULT_MAJOR) - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); - else - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); + if (fault & VM_FAULT_RETRY) { + flags |= FAULT_FLAG_TRIED; + + /* No need to mmap_read_unlock(mm) as we would + * have already released it in __lock_page_or_retry + * in mm/filemap.c. + */ + + goto retry; + } + mmap_read_unlock(mm); return; /* Something tried to access memory that isn't in our memory map.. * Fix it, but check if it's kernel or user first.. */ bad_area: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); +bad_area_nosemaphore: if (user_mode(regs)) { - current->thread.bad_vaddr = address; - current->thread.error_code = is_write; - force_sig_fault(SIGSEGV, code, (void *) address, current); + force_sig_fault(SIGSEGV, code, (void *) address); return; } bad_page_fault(regs, address, SIGSEGV); @@ -168,7 +210,7 @@ bad_area: * us unable to handle the page fault gracefully. */ out_of_memory: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); if (!user_mode(regs)) bad_page_fault(regs, address, SIGKILL); else @@ -176,70 +218,30 @@ out_of_memory: return; do_sigbus: - up_read(&mm->mmap_sem); + mmap_read_unlock(mm); /* Send a sigbus, regardless of whether we were in kernel * or user mode. */ - current->thread.bad_vaddr = address; - force_sig_fault(SIGBUS, BUS_ADRERR, (void *) address, current); + force_sig_fault(SIGBUS, BUS_ADRERR, (void *) address); /* Kernel mode? Handle exceptions or die */ if (!user_mode(regs)) bad_page_fault(regs, address, SIGBUS); return; - -vmalloc_fault: - { - /* Synchronize this task's top level page-table - * with the 'reference' page table. - */ - struct mm_struct *act_mm = current->active_mm; - int index = pgd_index(address); - pgd_t *pgd, *pgd_k; - pmd_t *pmd, *pmd_k; - pte_t *pte_k; - - if (act_mm == NULL) - goto bad_page_fault; - - pgd = act_mm->pgd + index; - pgd_k = init_mm.pgd + index; - - if (!pgd_present(*pgd_k)) - goto bad_page_fault; - - pgd_val(*pgd) = pgd_val(*pgd_k); - - pmd = pmd_offset(pgd, address); - pmd_k = pmd_offset(pgd_k, address); - if (!pmd_present(*pmd) || !pmd_present(*pmd_k)) - goto bad_page_fault; - - pmd_val(*pmd) = pmd_val(*pmd_k); - pte_k = pte_offset_kernel(pmd_k, address); - - if (!pte_present(*pte_k)) - goto bad_page_fault; - return; - } -bad_page_fault: - bad_page_fault(regs, address, SIGKILL); - return; } void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) { - extern void die(const char*, struct pt_regs*, long); + extern void __noreturn die(const char*, struct pt_regs*, long); const struct exception_table_entry *entry; /* Are we prepared to handle this kernel fault? */ if ((entry = search_exception_tables(regs->pc)) != NULL) { pr_debug("%s: Exception at pc=%#010lx (%lx)\n", current->comm, regs->pc, entry->fixup); - current->thread.bad_uaddr = address; regs->pc = entry->fixup; return; } @@ -251,5 +253,4 @@ bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) "address %08lx\n pc = %08lx, ra = %08lx\n", address, regs->pc, regs->areg[0]); die("Oops", regs, sig); - do_exit(sig); } |
