summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Hildenbrand <david@redhat.com>2023-12-20 23:44:36 +0100
committerAndrew Morton <akpm@linux-foundation.org>2023-12-29 11:58:50 -0800
commitbe6e57cfabe99a5d3b3869103c4ea0ed4a9692d4 (patch)
tree007e483e931faab70af996be9db8b47c58163b94
parent7123e19c3c9d1539c899ac8d919498e3393bb288 (diff)
mm/rmap: remove page_add_file_rmap()
All users are gone, let's remove it. Link: https://lkml.kernel.org/r/20231220224504.646757-13-david@redhat.com Signed-off-by: David Hildenbrand <david@redhat.com> Reviewed-by: Yin Fengwei <fengwei.yin@intel.com> Reviewed-by: Ryan Roberts <ryan.roberts@arm.com> Cc: Hugh Dickins <hughd@google.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Muchun Song <muchun.song@linux.dev> Cc: Muchun Song <songmuchun@bytedance.com> Cc: Peter Xu <peterx@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--include/linux/rmap.h2
-rw-r--r--mm/rmap.c21
2 files changed, 0 insertions, 23 deletions
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 3d86a76b2836..6a4db6933e7d 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -237,8 +237,6 @@ void page_add_anon_rmap(struct page *, struct vm_area_struct *,
unsigned long address, rmap_t flags);
void folio_add_new_anon_rmap(struct folio *, struct vm_area_struct *,
unsigned long address);
-void page_add_file_rmap(struct page *, struct vm_area_struct *,
- bool compound);
void folio_add_file_rmap_ptes(struct folio *, struct page *, int nr_pages,
struct vm_area_struct *);
#define folio_add_file_rmap_pte(folio, page, vma) \
diff --git a/mm/rmap.c b/mm/rmap.c
index cc1fc2d570f0..5ab5ef10fbf5 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1468,27 +1468,6 @@ void folio_add_file_rmap_pmd(struct folio *folio, struct page *page,
}
/**
- * page_add_file_rmap - add pte mapping to a file page
- * @page: the page to add the mapping to
- * @vma: the vm area in which the mapping is added
- * @compound: charge the page as compound or small page
- *
- * The caller needs to hold the pte lock.
- */
-void page_add_file_rmap(struct page *page, struct vm_area_struct *vma,
- bool compound)
-{
- struct folio *folio = page_folio(page);
-
- VM_WARN_ON_ONCE_PAGE(compound && !PageTransHuge(page), page);
-
- if (likely(!compound))
- folio_add_file_rmap_pte(folio, page, vma);
- else
- folio_add_file_rmap_pmd(folio, page, vma);
-}
-
-/**
* page_remove_rmap - take down pte mapping from a page
* @page: page to remove mapping from
* @vma: the vm area from which the mapping is removed