summaryrefslogtreecommitdiff
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorBen Hutchings <ben@decadent.org.uk>2023-06-22 21:24:30 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2023-06-24 14:12:58 -0700
commit8b35ca3e45e35a26a21427f35d4093606e93ad0a (patch)
treea5e755ba10e7ebfc8770ae4f247aaad5cf831e58 /arch/arm/mm
parent7267ef7b0b77f4ed23b7b3c87d8eca7bd9c2d007 (diff)
arm/mm: Convert to using lock_mm_and_find_vma()
arm has an additional check for address < FIRST_USER_ADDRESS before expanding the stack. Since FIRST_USER_ADDRESS is defined everywhere (generally as 0), move that check to the generic expand_downwards(). Signed-off-by: Ben Hutchings <ben@decadent.org.uk> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/fault.c63
1 files changed, 14 insertions, 49 deletions
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 2418f1efabd8..0860eeba8bd3 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -232,37 +232,11 @@ static inline bool is_permission_fault(unsigned int fsr)
return false;
}
-static vm_fault_t __kprobes
-__do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int flags,
- unsigned long vma_flags, struct pt_regs *regs)
-{
- struct vm_area_struct *vma = find_vma(mm, addr);
- if (unlikely(!vma))
- return VM_FAULT_BADMAP;
-
- if (unlikely(vma->vm_start > addr)) {
- if (!(vma->vm_flags & VM_GROWSDOWN))
- return VM_FAULT_BADMAP;
- if (addr < FIRST_USER_ADDRESS)
- return VM_FAULT_BADMAP;
- if (expand_stack(vma, addr))
- return VM_FAULT_BADMAP;
- }
-
- /*
- * ok, we have a good vm_area for this memory access, check the
- * permissions on the VMA allow for the fault which occurred.
- */
- if (!(vma->vm_flags & vma_flags))
- return VM_FAULT_BADACCESS;
-
- return handle_mm_fault(vma, addr & PAGE_MASK, flags, regs);
-}
-
static int __kprobes
do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
int sig, code;
vm_fault_t fault;
unsigned int flags = FAULT_FLAG_DEFAULT;
@@ -301,31 +275,21 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
- /*
- * As per x86, we may deadlock here. However, since the kernel only
- * validly references user space from well defined areas of the code,
- * we can bug out early if this is from code which shouldn't.
- */
- if (!mmap_read_trylock(mm)) {
- if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc))
- goto no_context;
retry:
- mmap_read_lock(mm);
- } else {
- /*
- * The above down_read_trylock() might have succeeded in
- * which case, we'll have missed the might_sleep() from
- * down_read()
- */
- might_sleep();
-#ifdef CONFIG_DEBUG_VM
- if (!user_mode(regs) &&
- !search_exception_tables(regs->ARM_pc))
- goto no_context;
-#endif
+ vma = lock_mm_and_find_vma(mm, addr, regs);
+ if (unlikely(!vma)) {
+ fault = VM_FAULT_BADMAP;
+ goto bad_area;
}
- fault = __do_page_fault(mm, addr, flags, vm_flags, regs);
+ /*
+ * ok, we have a good vm_area for this memory access, check the
+ * permissions on the VMA allow for the fault which occurred.
+ */
+ if (!(vma->vm_flags & vm_flags))
+ fault = VM_FAULT_BADACCESS;
+ else
+ fault = handle_mm_fault(vma, addr & PAGE_MASK, flags, regs);
/* If we need to retry but a fatal signal is pending, handle the
* signal first. We do not need to release the mmap_lock because
@@ -356,6 +320,7 @@ retry:
if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS))))
return 0;
+bad_area:
/*
* If we are in kernel mode at this point, we
* have no context to handle this fault with.