summaryrefslogtreecommitdiff
path: root/mm/rmap.c
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2023-01-11 14:28:54 +0000
committerAndrew Morton <akpm@linux-foundation.org>2023-02-02 22:32:56 -0800
commit4d510f3da4c216d4c2695395f67aec38e2aa6cc7 (patch)
treee726b8fb8ac10a96c739baa733f526fd5c083e5b /mm/rmap.c
parenteb01a2ad7e9cba1b9dd131edc5a26ffbda90a5ed (diff)
mm: add folio_add_new_anon_rmap()
In contrast to other rmap functions, page_add_new_anon_rmap() is always called with a freshly allocated page. That means it can't be called with a tail page. Turn page_add_new_anon_rmap() into folio_add_new_anon_rmap() and add a page_add_new_anon_rmap() wrapper. Callers can be converted individually. [akpm@linux-foundation.org: fix NOMMU build. page_add_new_anon_rmap() requires CONFIG_MMU] [willy@infradead.org: folio-compat.c needs rmap.h] Link: https://lkml.kernel.org/r/20230111142915.1001531-9-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/rmap.c')
-rw-r--r--mm/rmap.c37
1 files changed, 18 insertions, 19 deletions
diff --git a/mm/rmap.c b/mm/rmap.c
index 430c066c2295..513e23e5a158 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1264,41 +1264,40 @@ void page_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
}
/**
- * page_add_new_anon_rmap - add mapping to a new anonymous page
- * @page: the page to add the mapping to
+ * folio_add_new_anon_rmap - Add mapping to a new anonymous folio.
+ * @folio: The folio to add the mapping to.
* @vma: the vm area in which the mapping is added
* @address: the user virtual address mapped
*
- * If it's a compound page, it is accounted as a compound page. As the page
- * is new, it's assume to get mapped exclusively by a single process.
- *
- * Same as page_add_anon_rmap but must only be called on *new* pages.
+ * Like page_add_anon_rmap() but must only be called on *new* folios.
* This means the inc-and-test can be bypassed.
- * Page does not have to be locked.
+ * The folio does not have to be locked.
+ *
+ * If the folio is large, it is accounted as a THP. As the folio
+ * is new, it's assumed to be mapped exclusively by a single process.
*/
-void page_add_new_anon_rmap(struct page *page,
- struct vm_area_struct *vma, unsigned long address)
+void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
+ unsigned long address)
{
int nr;
VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
- __SetPageSwapBacked(page);
+ __folio_set_swapbacked(folio);
- if (likely(!PageCompound(page))) {
+ if (likely(!folio_test_pmd_mappable(folio))) {
/* increment count (starts at -1) */
- atomic_set(&page->_mapcount, 0);
+ atomic_set(&folio->_mapcount, 0);
nr = 1;
} else {
- VM_BUG_ON_PAGE(!PageTransHuge(page), page);
/* increment count (starts at -1) */
- atomic_set(compound_mapcount_ptr(page), 0);
- atomic_set(subpages_mapcount_ptr(page), COMPOUND_MAPPED);
- nr = thp_nr_pages(page);
- __mod_lruvec_page_state(page, NR_ANON_THPS, nr);
+ atomic_set(&folio->_entire_mapcount, 0);
+ atomic_set(&folio->_nr_pages_mapped, COMPOUND_MAPPED);
+ nr = folio_nr_pages(folio);
+ __lruvec_stat_mod_folio(folio, NR_ANON_THPS, nr);
}
- __mod_lruvec_page_state(page, NR_ANON_MAPPED, nr);
- __page_set_anon_rmap(page, vma, address, 1);
+ __lruvec_stat_mod_folio(folio, NR_ANON_MAPPED, nr);
+ __page_set_anon_rmap(&folio->page, vma, address, 1);
}
/**