summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2023-01-16 19:28:25 +0000
committerAndrew Morton <akpm@linux-foundation.org>2023-02-02 22:33:20 -0800
commit7efecffb8e7968c4a6c53177b0053ca4765fe233 (patch)
tree1b24f9d3ced3869c347ef1f83851fa1b0089376d /mm
parent90c9d13a47d45f2f16530c4d62af2fa4d74dfd16 (diff)
mm: remove mlock_vma_page()
All callers now have a folio and can call mlock_vma_folio(). Update the documentation to refer to mlock_vma_folio(). Link: https://lkml.kernel.org/r/20230116192827.2146732-3-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/internal.h10
-rw-r--r--mm/mlock.c4
-rw-r--r--mm/rmap.c4
3 files changed, 5 insertions, 13 deletions
diff --git a/mm/internal.h b/mm/internal.h
index 74bc1fe45711..0b74105ea363 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -518,7 +518,7 @@ extern long faultin_vma_page_range(struct vm_area_struct *vma,
extern int mlock_future_check(struct mm_struct *mm, unsigned long flags,
unsigned long len);
/*
- * mlock_vma_page() and munlock_vma_page():
+ * mlock_vma_folio() and munlock_vma_folio():
* should be called with vma's mmap_lock held for read or write,
* under page table lock for the pte/pmd being added or removed.
*
@@ -547,12 +547,6 @@ static inline void mlock_vma_folio(struct folio *folio,
mlock_folio(folio);
}
-static inline void mlock_vma_page(struct page *page,
- struct vm_area_struct *vma, bool compound)
-{
- mlock_vma_folio(page_folio(page), vma, compound);
-}
-
void munlock_folio(struct folio *folio);
static inline void munlock_vma_folio(struct folio *folio,
@@ -656,8 +650,6 @@ static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
}
#else /* !CONFIG_MMU */
static inline void unmap_mapping_folio(struct folio *folio) { }
-static inline void mlock_vma_page(struct page *page,
- struct vm_area_struct *vma, bool compound) { }
static inline void munlock_vma_page(struct page *page,
struct vm_area_struct *vma, bool compound) { }
static inline void mlock_new_folio(struct folio *folio) { }
diff --git a/mm/mlock.c b/mm/mlock.c
index 9e9c8be58277..b680f11879c3 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -370,9 +370,9 @@ static void mlock_vma_pages_range(struct vm_area_struct *vma,
/*
* There is a slight chance that concurrent page migration,
* or page reclaim finding a page of this now-VM_LOCKED vma,
- * will call mlock_vma_page() and raise page's mlock_count:
+ * will call mlock_vma_folio() and raise page's mlock_count:
* double counting, leaving the page unevictable indefinitely.
- * Communicate this danger to mlock_vma_page() with VM_IO,
+ * Communicate this danger to mlock_vma_folio() with VM_IO,
* which is a VM_SPECIAL flag not allowed on VM_LOCKED vmas.
* mmap_lock is held in write mode here, so this weird
* combination should not be visible to other mmap_lock users;
diff --git a/mm/rmap.c b/mm/rmap.c
index 0d07c500fc86..33e15181ae73 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1260,7 +1260,7 @@ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
__page_check_anon_rmap(page, vma, address);
}
- mlock_vma_page(page, vma, compound);
+ mlock_vma_folio(folio, vma, compound);
}
/**
@@ -1351,7 +1351,7 @@ void page_add_file_rmap(struct page *page, struct vm_area_struct *vma,
if (nr)
__lruvec_stat_mod_folio(folio, NR_FILE_MAPPED, nr);
- mlock_vma_page(page, vma, compound);
+ mlock_vma_folio(folio, vma, compound);
}
/**