summaryrefslogtreecommitdiff
path: root/arch/riscv/mm/fault.c
diff options
context:
space:
mode:
authorJisheng Zhang <jszhang@kernel.org>2023-05-24 00:59:42 +0800
committerPalmer Dabbelt <palmer@rivosinc.com>2023-06-20 08:10:13 -0700
commit648321fa0d970c04b4327ac1a053abf43d285931 (patch)
treeb3b71a1f090d3fc8fcce526dcd20ecad301ef894 /arch/riscv/mm/fault.c
parent7d3332be011e4ed061c1403b30b5e54ebccb4fa2 (diff)
riscv: mm: try VMA lock-based page fault handling first
Attempt VMA lock-based page fault handling first, and fall back to the existing mmap_lock-based handling if that fails. A simple running the ebizzy benchmark on Lichee Pi 4A shows that PER_VMA_LOCK can improve the ebizzy benchmark by about 32.68%. In theory, the more CPUs, the bigger improvement, but I don't have any HW platform which has more than 4 CPUs. This is the riscv variant of "x86/mm: try VMA lock-based page fault handling first". Signed-off-by: Jisheng Zhang <jszhang@kernel.org> Reviewed-by: Guo Ren <guoren@kernel.org> Reviewed-by: Suren Baghdasaryan <surenb@google.com> Link: https://lore.kernel.org/r/20230523165942.2630-1-jszhang@kernel.org Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
Diffstat (limited to 'arch/riscv/mm/fault.c')
-rw-r--r--arch/riscv/mm/fault.c33
1 files changed, 33 insertions, 0 deletions
diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
index b023fb311e28..e52ed89a0cdb 100644
--- a/arch/riscv/mm/fault.c
+++ b/arch/riscv/mm/fault.c
@@ -274,6 +274,36 @@ void handle_page_fault(struct pt_regs *regs)
flags |= FAULT_FLAG_WRITE;
else if (cause == EXC_INST_PAGE_FAULT)
flags |= FAULT_FLAG_INSTRUCTION;
+#ifdef CONFIG_PER_VMA_LOCK
+ if (!(flags & FAULT_FLAG_USER))
+ goto lock_mmap;
+
+ vma = lock_vma_under_rcu(mm, addr);
+ if (!vma)
+ goto lock_mmap;
+
+ if (unlikely(access_error(cause, vma))) {
+ vma_end_read(vma);
+ goto lock_mmap;
+ }
+
+ fault = handle_mm_fault(vma, addr, flags | FAULT_FLAG_VMA_LOCK, regs);
+ vma_end_read(vma);
+
+ if (!(fault & VM_FAULT_RETRY)) {
+ count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
+ goto done;
+ }
+ count_vm_vma_lock_event(VMA_LOCK_RETRY);
+
+ if (fault_signal_pending(fault, regs)) {
+ if (!user_mode(regs))
+ no_context(regs, addr);
+ return;
+ }
+lock_mmap:
+#endif /* CONFIG_PER_VMA_LOCK */
+
retry:
mmap_read_lock(mm);
vma = find_vma(mm, addr);
@@ -343,6 +373,9 @@ good_area:
mmap_read_unlock(mm);
+#ifdef CONFIG_PER_VMA_LOCK
+done:
+#endif
if (unlikely(fault & VM_FAULT_ERROR)) {
tsk->thread.bad_cause = cause;
mm_fault_error(regs, addr, fault);