summaryrefslogtreecommitdiff
path: root/arch/mips/mm/fault.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/mm/fault.c')
-rw-r--r--arch/mips/mm/fault.c46
1 files changed, 22 insertions, 24 deletions
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index e7abda9c013f..37fedeaca2e9 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -26,6 +26,7 @@
#include <asm/mmu_context.h>
#include <asm/ptrace.h>
#include <asm/highmem.h> /* For VMALLOC_END */
+#include <asm/traps.h>
#include <linux/kdebug.h>
int show_unhandled_signals = 1;
@@ -35,7 +36,7 @@ int show_unhandled_signals = 1;
* and the problem, and then passes it off to one of the appropriate
* routines.
*/
-static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write,
+static void __do_page_fault(struct pt_regs *regs, unsigned long write,
unsigned long address)
{
struct vm_area_struct * vma = NULL;
@@ -82,8 +83,8 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write,
if (unlikely(address >= VMALLOC_START && address <= VMALLOC_END))
goto VMALLOC_FAULT_TARGET;
-#ifdef MODULE_START
- if (unlikely(address >= MODULE_START && address < MODULE_END))
+#ifdef MODULES_VADDR
+ if (unlikely(address >= MODULES_VADDR && address < MODULES_END))
goto VMALLOC_FAULT_TARGET;
#endif
@@ -99,21 +100,13 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write,
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
retry:
- mmap_read_lock(mm);
- vma = find_vma(mm, address);
+ vma = lock_mm_and_find_vma(mm, address, regs);
if (!vma)
- goto bad_area;
- if (vma->vm_start <= address)
- goto good_area;
- if (!(vma->vm_flags & VM_GROWSDOWN))
- goto bad_area;
- if (expand_stack(vma, address))
- goto bad_area;
+ goto bad_area_nosemaphore;
/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it..
*/
-good_area:
si_code = SEGV_ACCERR;
if (write) {
@@ -162,6 +155,10 @@ good_area:
return;
}
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
+ return;
+
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
@@ -171,18 +168,17 @@ good_area:
goto do_sigbus;
BUG();
}
- if (flags & FAULT_FLAG_ALLOW_RETRY) {
- if (fault & VM_FAULT_RETRY) {
- flags |= FAULT_FLAG_TRIED;
- /*
- * No need to mmap_read_unlock(mm) as we would
- * have already released it in __lock_page_or_retry
- * in mm/filemap.c.
- */
+ if (fault & VM_FAULT_RETRY) {
+ flags |= FAULT_FLAG_TRIED;
- goto retry;
- }
+ /*
+ * No need to mmap_read_unlock(mm) as we would
+ * have already released it in __lock_page_or_retry
+ * in mm/filemap.c.
+ */
+
+ goto retry;
}
mmap_read_unlock(mm);
@@ -323,8 +319,9 @@ vmalloc_fault:
}
#endif
}
+NOKPROBE_SYMBOL(__do_page_fault);
-asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
+asmlinkage void do_page_fault(struct pt_regs *regs,
unsigned long write, unsigned long address)
{
enum ctx_state prev_state;
@@ -333,3 +330,4 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
__do_page_fault(regs, write, address);
exception_exit(prev_state);
}
+NOKPROBE_SYMBOL(do_page_fault);