summaryrefslogtreecommitdiff
path: root/mm/memory.c
diff options
context:
space:
mode:
authorSeongJae Park <sj@kernel.org>2025-04-09 17:00:22 -0700
committerAndrew Morton <akpm@linux-foundation.org>2025-05-11 17:48:27 -0700
commit43c4cfde7e37460482e41dbb6837d4bff290d2cc (patch)
tree5e96c4d62cb115c0f3be8b2fc399e568cdf3c396 /mm/memory.c
parentde8efdf8cd273619121cbc2f0f70dddc74bc586e (diff)
mm/madvise: batch tlb flushes for MADV_DONTNEED[_LOCKED]
MADV_DONTNEED[_LOCKED] handling for [process_]madvise() flushes tlb for each vma of each address range. Update the logic to do tlb flushes in a batched way. Initialize an mmu_gather object from do_madvise() and vector_madvise(), which are the entry level functions for [process_]madvise(), respectively. And pass those objects to the function for per-vma work, via madvise_behavior struct. Make the per-vma logic not flushes tlb on their own but just saves the tlb entries to the received mmu_gather object. For this internal logic change, make zap_page_range_single_batched() non-static and use it directly from madvise_dontneed_single_vma(). Finally, the entry level functions flush the tlb entries that gathered for the entire user request, at once. Link: https://lkml.kernel.org/r/20250410000022.1901-5-sj@kernel.org Signed-off-by: SeongJae Park <sj@kernel.org> Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/mm/memory.c b/mm/memory.c
index 33d34722e339..71c255f3fdcc 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1998,7 +1998,7 @@ void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
mmu_notifier_invalidate_range_end(&range);
}
-/*
+/**
* zap_page_range_single_batched - remove user pages in a given range
* @tlb: pointer to the caller's struct mmu_gather
* @vma: vm_area_struct holding the applicable pages
@@ -2009,7 +2009,7 @@ void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
* @tlb shouldn't be NULL. The range must fit into one VMA. If @vma is for
* hugetlb, @tlb is flushed and re-initialized by this function.
*/
-static void zap_page_range_single_batched(struct mmu_gather *tlb,
+void zap_page_range_single_batched(struct mmu_gather *tlb,
struct vm_area_struct *vma, unsigned long address,
unsigned long size, struct zap_details *details)
{