summaryrefslogtreecommitdiff
path: root/arch/arc/mm/fault.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arc/mm/fault.c')
-rw-r--r--arch/arc/mm/fault.c242
1 files changed, 103 insertions, 139 deletions
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
index 0fd1f0d515ff..95119a5e7761 100644
--- a/arch/arc/mm/fault.c
+++ b/arch/arc/mm/fault.c
@@ -1,49 +1,67 @@
+// SPDX-License-Identifier: GPL-2.0-only
/* Page Fault Handling for ARC (TLB Miss / ProtV)
*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#include <linux/signal.h>
#include <linux/interrupt.h>
-#include <linux/sched.h>
+#include <linux/sched/signal.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/uaccess.h>
#include <linux/kdebug.h>
-#include <asm/pgalloc.h>
+#include <linux/perf_event.h>
+#include <linux/mm_types.h>
+#include <asm/entry.h>
#include <asm/mmu.h>
-static int handle_vmalloc_fault(struct mm_struct *mm, unsigned long address)
+/*
+ * kernel virtual address is required to implement vmalloc/pkmap/fixmap
+ * Refer to asm/processor.h for System Memory Map
+ *
+ * It simply copies the PMD entry (pointer to 2nd level page table or hugepage)
+ * from swapper pgdir to task pgdir. The 2nd level table/page is thus shared
+ */
+noinline static int handle_kernel_vaddr_fault(unsigned long address)
{
/*
* Synchronize this task's top level page-table
* with the 'reference' page table.
*/
pgd_t *pgd, *pgd_k;
+ p4d_t *p4d, *p4d_k;
pud_t *pud, *pud_k;
pmd_t *pmd, *pmd_k;
- pgd = pgd_offset_fast(mm, address);
+ pgd = pgd_offset(current->active_mm, address);
pgd_k = pgd_offset_k(address);
- if (!pgd_present(*pgd_k))
+ if (pgd_none (*pgd_k))
goto bad_area;
+ if (!pgd_present(*pgd))
+ set_pgd(pgd, *pgd_k);
- pud = pud_offset(pgd, address);
- pud_k = pud_offset(pgd_k, address);
- if (!pud_present(*pud_k))
+ p4d = p4d_offset(pgd, address);
+ p4d_k = p4d_offset(pgd_k, address);
+ if (p4d_none(*p4d_k))
goto bad_area;
+ if (!p4d_present(*p4d))
+ set_p4d(p4d, *p4d_k);
+
+ pud = pud_offset(p4d, address);
+ pud_k = pud_offset(p4d_k, address);
+ if (pud_none(*pud_k))
+ goto bad_area;
+ if (!pud_present(*pud))
+ set_pud(pud, *pud_k);
pmd = pmd_offset(pud, address);
pmd_k = pmd_offset(pud_k, address);
- if (!pmd_present(*pmd_k))
+ if (pmd_none(*pmd_k))
goto bad_area;
-
- set_pmd(pmd, *pmd_k);
+ if (!pmd_present(*pmd))
+ set_pmd(pmd, *pmd_k);
/* XXX: create the TLB entry here */
return 0;
@@ -52,178 +70,124 @@ bad_area:
return 1;
}
-void do_page_fault(struct pt_regs *regs, unsigned long address)
+void do_page_fault(unsigned long address, struct pt_regs *regs)
{
struct vm_area_struct *vma = NULL;
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm;
- siginfo_t info;
- int fault, ret;
- int write = regs->ecr_cause & ECR_C_PROTV_STORE; /* ST/EX */
- unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
- (write ? FAULT_FLAG_WRITE : 0);
+ int sig, si_code = SEGV_MAPERR;
+ unsigned int write = 0, exec = 0, mask;
+ vm_fault_t fault = VM_FAULT_SIGSEGV; /* handle_mm_fault() output */
+ unsigned int flags; /* handle_mm_fault() input */
/*
- * We fault-in kernel-space virtual memory on-demand. The
- * 'reference' page table is init_mm.pgd.
- *
* NOTE! We MUST NOT take any locks for this case. We may
* be in an interrupt or a critical region, and should
* only copy the information from the master page table,
* nothing more.
*/
- if (address >= VMALLOC_START && address <= VMALLOC_END) {
- ret = handle_vmalloc_fault(mm, address);
- if (unlikely(ret))
- goto bad_area_nosemaphore;
+ if (address >= VMALLOC_START && !user_mode(regs)) {
+ if (unlikely(handle_kernel_vaddr_fault(address)))
+ goto no_context;
else
return;
}
- info.si_code = SEGV_MAPERR;
-
/*
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
- if (in_atomic() || !mm)
+ if (faulthandler_disabled() || !mm)
goto no_context;
+ if (regs->ecr.cause & ECR_C_PROTV_STORE) /* ST/EX */
+ write = 1;
+ else if ((regs->ecr.vec == ECR_V_PROTV) &&
+ (regs->ecr.cause == ECR_C_PROTV_INST_FETCH))
+ exec = 1;
+
+ flags = FAULT_FLAG_DEFAULT;
+ if (user_mode(regs))
+ flags |= FAULT_FLAG_USER;
+ if (write)
+ flags |= FAULT_FLAG_WRITE;
+
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
retry:
- down_read(&mm->mmap_sem);
- vma = find_vma(mm, address);
+ vma = lock_mm_and_find_vma(mm, address, regs);
if (!vma)
- goto bad_area;
- if (vma->vm_start <= address)
- goto good_area;
- if (!(vma->vm_flags & VM_GROWSDOWN))
- goto bad_area;
- if (expand_stack(vma, address))
- goto bad_area;
+ goto bad_area_nosemaphore;
/*
- * Ok, we have a good vm_area for this memory access, so
- * we can handle it..
+ * vm_area is good, now check permissions for this memory access
*/
-good_area:
- info.si_code = SEGV_ACCERR;
-
- /* Handle protection violation, execute on heap or stack */
-
- if ((regs->ecr_vec == ECR_V_PROTV) &&
- (regs->ecr_cause == ECR_C_PROTV_INST_FETCH))
+ mask = VM_READ;
+ if (write)
+ mask = VM_WRITE;
+ if (exec)
+ mask = VM_EXEC;
+
+ if (!(vma->vm_flags & mask)) {
+ si_code = SEGV_ACCERR;
goto bad_area;
-
- if (write) {
- if (!(vma->vm_flags & VM_WRITE))
- goto bad_area;
- } else {
- if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
- goto bad_area;
}
-survive:
- /*
- * If for any reason at all we couldn't handle the fault,
- * make sure we exit gracefully rather than endlessly redo
- * the fault.
- */
- fault = handle_mm_fault(mm, vma, address, flags);
+ fault = handle_mm_fault(vma, address, flags, regs);
- /* If Pagefault was interrupted by SIGKILL, exit page fault "early" */
- if (unlikely(fatal_signal_pending(current))) {
- if ((fault & VM_FAULT_ERROR) && !(fault & VM_FAULT_RETRY))
- up_read(&mm->mmap_sem);
- if (user_mode(regs))
- return;
- }
-
- if (likely(!(fault & VM_FAULT_ERROR))) {
- if (flags & FAULT_FLAG_ALLOW_RETRY) {
- /* To avoid updating stats twice for retry case */
- if (fault & VM_FAULT_MAJOR)
- tsk->maj_flt++;
- else
- tsk->min_flt++;
-
- if (fault & VM_FAULT_RETRY) {
- flags &= ~FAULT_FLAG_ALLOW_RETRY;
- flags |= FAULT_FLAG_TRIED;
- goto retry;
- }
- }
-
- /* Fault Handled Gracefully */
- up_read(&mm->mmap_sem);
+ /* Quick path to respond to signals */
+ if (fault_signal_pending(fault, regs)) {
+ if (!user_mode(regs))
+ goto no_context;
return;
}
- /* TBD: switch to pagefault_out_of_memory() */
- if (fault & VM_FAULT_OOM)
- goto out_of_memory;
- else if (fault & VM_FAULT_SIGBUS)
- goto do_sigbus;
-
- /* no man's land */
- BUG();
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
+ return;
/*
- * Something tried to access memory that isn't in our memory map..
- * Fix it, but check if it's kernel or user first..
+ * Fault retry nuances, mmap_lock already relinquished by core mm
*/
+ if (unlikely(fault & VM_FAULT_RETRY)) {
+ flags |= FAULT_FLAG_TRIED;
+ goto retry;
+ }
+
bad_area:
- up_read(&mm->mmap_sem);
+ mmap_read_unlock(mm);
bad_area_nosemaphore:
- /* User mode accesses just cause a SIGSEGV */
- if (user_mode(regs)) {
- tsk->thread.fault_address = address;
- info.si_signo = SIGSEGV;
- info.si_errno = 0;
- /* info.si_code has been set above */
- info.si_addr = (void __user *)address;
- force_sig_info(SIGSEGV, &info, tsk);
- return;
- }
-
-no_context:
- /* Are we prepared to handle this kernel fault?
- *
- * (The kernel has valid exception-points in the source
- * when it acesses user-memory. When it fails in one
- * of those points, we find it in a table and do a jump
- * to some fixup code that loads an appropriate error
- * code)
+ /*
+ * Major/minor page fault accounting
+ * (in case of retry we only land here once)
*/
- if (fixup_exception(regs))
+ if (likely(!(fault & VM_FAULT_ERROR)))
+ /* Normal return path: fault Handled Gracefully */
return;
- die("Oops", regs, address);
-
-out_of_memory:
- if (is_global_init(tsk)) {
- yield();
- goto survive;
- }
- up_read(&mm->mmap_sem);
+ if (!user_mode(regs))
+ goto no_context;
- if (user_mode(regs)) {
+ if (fault & VM_FAULT_OOM) {
pagefault_out_of_memory();
return;
}
- goto no_context;
+ if (fault & VM_FAULT_SIGBUS) {
+ sig = SIGBUS;
+ si_code = BUS_ADRERR;
+ }
+ else {
+ sig = SIGSEGV;
+ }
-do_sigbus:
- up_read(&mm->mmap_sem);
+ tsk->thread.fault_address = address;
+ force_sig_fault(sig, si_code, (void __user *)address);
+ return;
- if (!user_mode(regs))
- goto no_context;
+no_context:
+ if (fixup_exception(regs))
+ return;
- tsk->thread.fault_address = address;
- info.si_signo = SIGBUS;
- info.si_errno = 0;
- info.si_code = BUS_ADRERR;
- info.si_addr = (void __user *)address;
- force_sig_info(SIGBUS, &info, tsk);
+ die("Oops", regs, address);
}