summaryrefslogtreecommitdiff
path: root/mm/memory.c
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2023-07-24 19:54:08 +0100
committerAndrew Morton <akpm@linux-foundation.org>2023-08-18 10:12:52 -0700
commitf5617ffeb450f84c57f7eba1a3524a29955d42b7 (patch)
tree74d1b0c1d61e71d0500fd658645b662277a8bc6d /mm/memory.c
parent61a4b8d32025dcabcd78994f887a4b9dff912cf0 (diff)
mm: run the fault-around code under the VMA lock
The map_pages fs method should be safe to run under the VMA lock instead of the mmap lock. This should have a measurable reduction in contention on the mmap lock. Link: https://lkml.kernel.org/r/20230724185410.1124082-9-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Suren Baghdasaryan <surenb@google.com> Cc: Arjun Roy <arjunroy@google.com> Cc: Eric Dumazet <edumazet@google.com> Cc: Punit Agrawal <punit.agrawal@bytedance.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 23a20b7a483c..52235aa3d665 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4533,11 +4533,6 @@ static vm_fault_t do_read_fault(struct vm_fault *vmf)
vm_fault_t ret = 0;
struct folio *folio;
- if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
- vma_end_read(vmf->vma);
- return VM_FAULT_RETRY;
- }
-
/*
* Let's call ->map_pages() first and use ->fault() as fallback
* if page by the offset is not ready to be mapped (cold cache or
@@ -4549,6 +4544,11 @@ static vm_fault_t do_read_fault(struct vm_fault *vmf)
return ret;
}
+ if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
+ vma_end_read(vmf->vma);
+ return VM_FAULT_RETRY;
+ }
+
ret = __do_fault(vmf);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
return ret;