diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2022-04-08 06:52:50 -1000 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2022-04-08 06:52:50 -1000 |
commit | d66b6985b164b368657333c4a700bf4108893612 (patch) | |
tree | b3d9396b70cdc13321d89e7c5e7871f75aea4334 /mm/mempolicy.c | |
parent | d00c50b35101b862c3db270ffeba53a63a1063d9 (diff) | |
parent | 98ea02597b9967c0817d29fee2f96d21b9e59ca5 (diff) |
Merge tag 'folio-5.18e' of git://git.infradead.org/users/willy/pagecache
Pull folio fixes from Matthew Wilcox:
"Fewer bug reports than I was expecting from enabling large folios.
One that doesn't show up on x86 but does on arm64, one that shows up
with hugetlbfs memory failure testing and one that shows up with page
migration, which it turns out I wasn't testing because my last NUMA
machine died. Need to set up a qemu fake NUMA machine so I don't skip
testing that in future.
Summary:
- Remove the migration code's assumptions about large pages being PMD
sized
- Don't call pmd_page() on a non-leaf PMD
- Fix handling of hugetlbfs pages in page_vma_mapped_walk"
* tag 'folio-5.18e' of git://git.infradead.org/users/willy/pagecache:
mm/rmap: Fix handling of hugetlbfs pages in page_vma_mapped_walk
mm/mempolicy: Use vma_alloc_folio() in new_page()
mm: Add vma_alloc_folio()
mm/migrate: Use a folio in migrate_misplaced_transhuge_page()
mm/migrate: Use a folio in alloc_migration_target()
mm/huge_memory: Avoid calling pmd_page() on a non-leaf PMD
Diffstat (limited to 'mm/mempolicy.c')
-rw-r--r-- | mm/mempolicy.c | 38 |
1 files changed, 24 insertions, 14 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index a2516d31db6c..649bd3be8682 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1191,8 +1191,10 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, */ static struct page *new_page(struct page *page, unsigned long start) { + struct folio *dst, *src = page_folio(page); struct vm_area_struct *vma; unsigned long address; + gfp_t gfp = GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL; vma = find_vma(current->mm, start); while (vma) { @@ -1202,24 +1204,19 @@ static struct page *new_page(struct page *page, unsigned long start) vma = vma->vm_next; } - if (PageHuge(page)) { - return alloc_huge_page_vma(page_hstate(compound_head(page)), + if (folio_test_hugetlb(src)) + return alloc_huge_page_vma(page_hstate(&src->page), vma, address); - } else if (PageTransHuge(page)) { - struct page *thp; - thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address, - HPAGE_PMD_ORDER); - if (!thp) - return NULL; - prep_transhuge_page(thp); - return thp; - } + if (folio_test_large(src)) + gfp = GFP_TRANSHUGE; + /* - * if !vma, alloc_page_vma() will use task or system default policy + * if !vma, vma_alloc_folio() will use task or system default policy */ - return alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL, - vma, address); + dst = vma_alloc_folio(gfp, folio_order(src), vma, address, + folio_test_large(src)); + return &dst->page; } #else @@ -2227,6 +2224,19 @@ out: } EXPORT_SYMBOL(alloc_pages_vma); +struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma, + unsigned long addr, bool hugepage) +{ + struct folio *folio; + + folio = (struct folio *)alloc_pages_vma(gfp, order, vma, addr, + hugepage); + if (folio && order > 1) + prep_transhuge_page(&folio->page); + + return folio; +} + /** * alloc_pages - Allocate pages. * @gfp: GFP flags. |