diff options
Diffstat (limited to 'arch/xtensa/mm/fault.c')
| -rw-r--r-- | arch/xtensa/mm/fault.c | 135 |
1 files changed, 66 insertions, 69 deletions
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c index 06d0973a0d74..16e11b6f6f78 100644 --- a/arch/xtensa/mm/fault.c +++ b/arch/xtensa/mm/fault.c @@ -20,10 +20,63 @@ #include <asm/mmu_context.h> #include <asm/cacheflush.h> #include <asm/hardirq.h> +#include <asm/traps.h> -DEFINE_PER_CPU(unsigned long, asid_cache) = ASID_USER_FIRST; void bad_page_fault(struct pt_regs*, unsigned long, int); +static void vmalloc_fault(struct pt_regs *regs, unsigned int address) +{ +#ifdef CONFIG_MMU + /* Synchronize this task's top level page-table + * with the 'reference' page table. + */ + struct mm_struct *act_mm = current->active_mm; + int index = pgd_index(address); + pgd_t *pgd, *pgd_k; + p4d_t *p4d, *p4d_k; + pud_t *pud, *pud_k; + pmd_t *pmd, *pmd_k; + pte_t *pte_k; + + if (act_mm == NULL) + goto bad_page_fault; + + pgd = act_mm->pgd + index; + pgd_k = init_mm.pgd + index; + + if (!pgd_present(*pgd_k)) + goto bad_page_fault; + + pgd_val(*pgd) = pgd_val(*pgd_k); + + p4d = p4d_offset(pgd, address); + p4d_k = p4d_offset(pgd_k, address); + if (!p4d_present(*p4d) || !p4d_present(*p4d_k)) + goto bad_page_fault; + + pud = pud_offset(p4d, address); + pud_k = pud_offset(p4d_k, address); + if (!pud_present(*pud) || !pud_present(*pud_k)) + goto bad_page_fault; + + pmd = pmd_offset(pud, address); + pmd_k = pmd_offset(pud_k, address); + if (!pmd_present(*pmd) || !pmd_present(*pmd_k)) + goto bad_page_fault; + + pmd_val(*pmd) = pmd_val(*pmd_k); + pte_k = pte_offset_kernel(pmd_k, address); + + if (!pte_present(*pte_k)) + goto bad_page_fault; + return; + +bad_page_fault: + bad_page_fault(regs, address, SIGKILL); +#else + WARN_ONCE(1, "%s in noMMU configuration\n", __func__); +#endif +} /* * This routine handles page faults. It determines the address, * and the problem, and then passes it off to one of the appropriate @@ -49,8 +102,10 @@ void do_page_fault(struct pt_regs *regs) /* We fault-in kernel-space virtual memory on-demand. The * 'reference' page table is init_mm.pgd. */ - if (address >= TASK_SIZE && !user_mode(regs)) - goto vmalloc_fault; + if (address >= TASK_SIZE && !user_mode(regs)) { + vmalloc_fault(regs, address); + return; + } /* If we're in an interrupt or have no user * context, we must not take the fault.. @@ -76,23 +131,14 @@ void do_page_fault(struct pt_regs *regs) perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); retry: - mmap_read_lock(mm); - vma = find_vma(mm, address); - + vma = lock_mm_and_find_vma(mm, address, regs); if (!vma) - goto bad_area; - if (vma->vm_start <= address) - goto good_area; - if (!(vma->vm_flags & VM_GROWSDOWN)) - goto bad_area; - if (expand_stack(vma, address)) - goto bad_area; + goto bad_area_nosemaphore; /* Ok, we have a good vm_area for this memory access, so * we can handle it.. */ -good_area: code = SEGV_ACCERR; if (is_write) { @@ -114,10 +160,14 @@ good_area: if (fault_signal_pending(fault, regs)) { if (!user_mode(regs)) - goto bad_page_fault; + bad_page_fault(regs, address, SIGKILL); return; } + /* The fault is fully completed (including releasing mmap lock) */ + if (fault & VM_FAULT_COMPLETED) + return; + if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; @@ -147,9 +197,8 @@ good_area: */ bad_area: mmap_read_unlock(mm); +bad_area_nosemaphore: if (user_mode(regs)) { - current->thread.bad_vaddr = address; - current->thread.error_code = is_write; force_sig_fault(SIGSEGV, code, (void *) address); return; } @@ -174,63 +223,12 @@ do_sigbus: /* Send a sigbus, regardless of whether we were in kernel * or user mode. */ - current->thread.bad_vaddr = address; force_sig_fault(SIGBUS, BUS_ADRERR, (void *) address); /* Kernel mode? Handle exceptions or die */ if (!user_mode(regs)) bad_page_fault(regs, address, SIGBUS); return; - -vmalloc_fault: - { - /* Synchronize this task's top level page-table - * with the 'reference' page table. - */ - struct mm_struct *act_mm = current->active_mm; - int index = pgd_index(address); - pgd_t *pgd, *pgd_k; - p4d_t *p4d, *p4d_k; - pud_t *pud, *pud_k; - pmd_t *pmd, *pmd_k; - pte_t *pte_k; - - if (act_mm == NULL) - goto bad_page_fault; - - pgd = act_mm->pgd + index; - pgd_k = init_mm.pgd + index; - - if (!pgd_present(*pgd_k)) - goto bad_page_fault; - - pgd_val(*pgd) = pgd_val(*pgd_k); - - p4d = p4d_offset(pgd, address); - p4d_k = p4d_offset(pgd_k, address); - if (!p4d_present(*p4d) || !p4d_present(*p4d_k)) - goto bad_page_fault; - - pud = pud_offset(p4d, address); - pud_k = pud_offset(p4d_k, address); - if (!pud_present(*pud) || !pud_present(*pud_k)) - goto bad_page_fault; - - pmd = pmd_offset(pud, address); - pmd_k = pmd_offset(pud_k, address); - if (!pmd_present(*pmd) || !pmd_present(*pmd_k)) - goto bad_page_fault; - - pmd_val(*pmd) = pmd_val(*pmd_k); - pte_k = pte_offset_kernel(pmd_k, address); - - if (!pte_present(*pte_k)) - goto bad_page_fault; - return; - } -bad_page_fault: - bad_page_fault(regs, address, SIGKILL); - return; } @@ -244,7 +242,6 @@ bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) if ((entry = search_exception_tables(regs->pc)) != NULL) { pr_debug("%s: Exception at pc=%#010lx (%lx)\n", current->comm, regs->pc, entry->fixup); - current->thread.bad_uaddr = address; regs->pc = entry->fixup; return; } |
