summaryrefslogtreecommitdiff
path: root/mm/memory.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c20
1 files changed, 15 insertions, 5 deletions
diff --git a/mm/memory.c b/mm/memory.c
index d947d8d9e891..23a20b7a483c 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4533,6 +4533,11 @@ static vm_fault_t do_read_fault(struct vm_fault *vmf)
vm_fault_t ret = 0;
struct folio *folio;
+ if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
+ vma_end_read(vmf->vma);
+ return VM_FAULT_RETRY;
+ }
+
/*
* Let's call ->map_pages() first and use ->fault() as fallback
* if page by the offset is not ready to be mapped (cold cache or
@@ -4561,6 +4566,11 @@ static vm_fault_t do_cow_fault(struct vm_fault *vmf)
struct vm_area_struct *vma = vmf->vma;
vm_fault_t ret;
+ if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
+ vma_end_read(vma);
+ return VM_FAULT_RETRY;
+ }
+
if (unlikely(anon_vma_prepare(vma)))
return VM_FAULT_OOM;
@@ -4601,6 +4611,11 @@ static vm_fault_t do_shared_fault(struct vm_fault *vmf)
vm_fault_t ret, tmp;
struct folio *folio;
+ if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
+ vma_end_read(vma);
+ return VM_FAULT_RETRY;
+ }
+
ret = __do_fault(vmf);
if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
return ret;
@@ -4647,11 +4662,6 @@ static vm_fault_t do_fault(struct vm_fault *vmf)
struct mm_struct *vm_mm = vma->vm_mm;
vm_fault_t ret;
- if (vmf->flags & FAULT_FLAG_VMA_LOCK){
- vma_end_read(vma);
- return VM_FAULT_RETRY;
- }
-
/*
* The VMA was not fully populated on mmap() or missing VM_DONTEXPAND
*/