summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2023-01-11 14:28:48 +0000
committerAndrew Morton <akpm@linux-foundation.org>2023-02-02 22:32:55 -0800
commiteec20426d48bd7b63c69969a793943ed1a99b731 (patch)
treea7446adbfcf2816231534c28869a34d981d28e67 /mm
parent94688e8eb453e616098cb930e5f6fed4a6ea2dfa (diff)
mm: convert head_subpages_mapcount() into folio_nr_pages_mapped()
Calling this 'mapcount' is confusing since mapcount is usually the number of times something is mapped; instead this is the number of mapped pages. It's also better to enforce that this is a folio rather than a head page. Move folio_nr_pages_mapped() into mm/internal.h since this is not something we want device drivers or filesystems poking at. Get rid of folio_subpages_mapcount_ptr() and use folio->_nr_pages_mapped directly. Link: https://lkml.kernel.org/r/20230111142915.1001531-3-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/debug.c4
-rw-r--r--mm/hugetlb.c4
-rw-r--r--mm/internal.h18
-rw-r--r--mm/rmap.c9
4 files changed, 27 insertions, 8 deletions
diff --git a/mm/debug.c b/mm/debug.c
index 893c9dbf76ca..8e58e8dab0b2 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -94,10 +94,10 @@ static void __dump_page(struct page *page)
page, page_ref_count(head), mapcount, mapping,
page_to_pgoff(page), page_to_pfn(page));
if (compound) {
- pr_warn("head:%p order:%u compound_mapcount:%d subpages_mapcount:%d pincount:%d\n",
+ pr_warn("head:%p order:%u compound_mapcount:%d nr_pages_mapped:%d pincount:%d\n",
head, compound_order(head),
head_compound_mapcount(head),
- head_subpages_mapcount(head),
+ folio_nr_pages_mapped(folio),
atomic_read(&folio->_pincount));
}
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 15b2707c1600..c9702224931c 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1475,7 +1475,7 @@ static void __destroy_compound_gigantic_folio(struct folio *folio,
struct page *p;
atomic_set(folio_mapcount_ptr(folio), 0);
- atomic_set(folio_subpages_mapcount_ptr(folio), 0);
+ atomic_set(&folio->_nr_pages_mapped, 0);
atomic_set(&folio->_pincount, 0);
for (i = 1; i < nr_pages; i++) {
@@ -1997,7 +1997,7 @@ static bool __prep_compound_gigantic_folio(struct folio *folio,
set_compound_head(p, &folio->page);
}
atomic_set(folio_mapcount_ptr(folio), -1);
- atomic_set(folio_subpages_mapcount_ptr(folio), 0);
+ atomic_set(&folio->_nr_pages_mapped, 0);
atomic_set(&folio->_pincount, 0);
return true;
diff --git a/mm/internal.h b/mm/internal.h
index 1d6f4e168510..583e15357e09 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -52,6 +52,24 @@ struct folio_batch;
void page_writeback_init(void);
+/*
+ * If a 16GB hugetlb folio were mapped by PTEs of all of its 4kB pages,
+ * its nr_pages_mapped would be 0x400000: choose the COMPOUND_MAPPED bit
+ * above that range, instead of 2*(PMD_SIZE/PAGE_SIZE). Hugetlb currently
+ * leaves nr_pages_mapped at 0, but avoid surprise if it participates later.
+ */
+#define COMPOUND_MAPPED 0x800000
+#define FOLIO_PAGES_MAPPED (COMPOUND_MAPPED - 1)
+
+/*
+ * How many individual pages have an elevated _mapcount. Excludes
+ * the folio's entire_mapcount.
+ */
+static inline int folio_nr_pages_mapped(struct folio *folio)
+{
+ return atomic_read(&folio->_nr_pages_mapped) & FOLIO_PAGES_MAPPED;
+}
+
static inline void *folio_raw_mapping(struct folio *folio)
{
unsigned long mapping = (unsigned long)folio->mapping;
diff --git a/mm/rmap.c b/mm/rmap.c
index 6ccd42bbae93..b573472b4ac3 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1080,12 +1080,13 @@ int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff,
int total_compound_mapcount(struct page *head)
{
+ struct folio *folio = (struct folio *)head;
int mapcount = head_compound_mapcount(head);
int nr_subpages;
int i;
/* In the common case, avoid the loop when no subpages mapped by PTE */
- if (head_subpages_mapcount(head) == 0)
+ if (folio_nr_pages_mapped(folio) == 0)
return mapcount;
/*
* Add all the PTE mappings of those subpages mapped by PTE.
@@ -1233,7 +1234,7 @@ void page_add_anon_rmap(struct page *page,
nr = atomic_add_return_relaxed(COMPOUND_MAPPED, mapped);
if (likely(nr < COMPOUND_MAPPED + COMPOUND_MAPPED)) {
nr_pmdmapped = thp_nr_pages(page);
- nr = nr_pmdmapped - (nr & SUBPAGES_MAPPED);
+ nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED);
/* Raced ahead of a remove and another add? */
if (unlikely(nr < 0))
nr = 0;
@@ -1337,7 +1338,7 @@ void page_add_file_rmap(struct page *page,
nr = atomic_add_return_relaxed(COMPOUND_MAPPED, mapped);
if (likely(nr < COMPOUND_MAPPED + COMPOUND_MAPPED)) {
nr_pmdmapped = thp_nr_pages(page);
- nr = nr_pmdmapped - (nr & SUBPAGES_MAPPED);
+ nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED);
/* Raced ahead of a remove and another add? */
if (unlikely(nr < 0))
nr = 0;
@@ -1399,7 +1400,7 @@ void page_remove_rmap(struct page *page,
nr = atomic_sub_return_relaxed(COMPOUND_MAPPED, mapped);
if (likely(nr < COMPOUND_MAPPED)) {
nr_pmdmapped = thp_nr_pages(page);
- nr = nr_pmdmapped - (nr & SUBPAGES_MAPPED);
+ nr = nr_pmdmapped - (nr & FOLIO_PAGES_MAPPED);
/* Raced ahead of another remove and an add? */
if (unlikely(nr < 0))
nr = 0;