From a8e61d584eda0d5532b0bbfe3c2427d2688d3c83 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 20 Dec 2023 23:44:49 +0100 Subject: mm/huge_memory: page_remove_rmap() -> folio_remove_rmap_pmd() Let's convert zap_huge_pmd() and set_pmd_migration_entry(). While at it, perform some more folio conversion. Link: https://lkml.kernel.org/r/20231220224504.646757-26-david@redhat.com Signed-off-by: David Hildenbrand Cc: Hugh Dickins Cc: Matthew Wilcox (Oracle) Cc: Muchun Song Cc: Muchun Song Cc: Peter Xu Cc: Ryan Roberts Cc: Yin Fengwei Signed-off-by: Andrew Morton --- mm/huge_memory.c | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) (limited to 'mm/huge_memory.c') diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 3b84a0a88fa1..ddc03421df0f 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1898,7 +1898,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, if (pmd_present(orig_pmd)) { page = pmd_page(orig_pmd); - page_remove_rmap(page, vma, true); + folio_remove_rmap_pmd(page_folio(page), page, vma); VM_BUG_ON_PAGE(page_mapcount(page) < 0, page); VM_BUG_ON_PAGE(!PageHead(page), page); } else if (thp_migration_supported()) { @@ -2433,12 +2433,13 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, page = pfn_swap_entry_to_page(entry); } else { page = pmd_page(old_pmd); - if (!PageDirty(page) && pmd_dirty(old_pmd)) - set_page_dirty(page); - if (!PageReferenced(page) && pmd_young(old_pmd)) - SetPageReferenced(page); - page_remove_rmap(page, vma, true); - put_page(page); + folio = page_folio(page); + if (!folio_test_dirty(folio) && pmd_dirty(old_pmd)) + folio_set_dirty(folio); + if (!folio_test_referenced(folio) && pmd_young(old_pmd)) + folio_set_referenced(folio); + folio_remove_rmap_pmd(folio, page, vma); + folio_put(folio); } add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR); return; @@ -2593,7 +2594,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, pte_unmap(pte - 1); if (!pmd_migration) - page_remove_rmap(page, vma, true); + folio_remove_rmap_pmd(folio, page, vma); if (freeze) put_page(page); @@ -3536,6 +3537,7 @@ late_initcall(split_huge_pages_debugfs); int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, struct page *page) { + struct folio *folio = page_folio(page); struct vm_area_struct *vma = pvmw->vma; struct mm_struct *mm = vma->vm_mm; unsigned long address = pvmw->address; @@ -3551,14 +3553,14 @@ int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, pmdval = pmdp_invalidate(vma, address, pvmw->pmd); /* See page_try_share_anon_rmap(): invalidate PMD first. */ - anon_exclusive = PageAnon(page) && PageAnonExclusive(page); + anon_exclusive = folio_test_anon(folio) && PageAnonExclusive(page); if (anon_exclusive && page_try_share_anon_rmap(page)) { set_pmd_at(mm, address, pvmw->pmd, pmdval); return -EBUSY; } if (pmd_dirty(pmdval)) - set_page_dirty(page); + folio_set_dirty(folio); if (pmd_write(pmdval)) entry = make_writable_migration_entry(page_to_pfn(page)); else if (anon_exclusive) @@ -3575,8 +3577,8 @@ int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, if (pmd_uffd_wp(pmdval)) pmdswp = pmd_swp_mkuffd_wp(pmdswp); set_pmd_at(mm, address, pvmw->pmd, pmdswp); - page_remove_rmap(page, vma, true); - put_page(page); + folio_remove_rmap_pmd(folio, page, vma); + folio_put(folio); trace_set_migration_pmd(address, pmd_val(pmdswp)); return 0; -- cgit