summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-04-08 06:52:50 -1000
committerLinus Torvalds <torvalds@linux-foundation.org>2022-04-08 06:52:50 -1000
commitd66b6985b164b368657333c4a700bf4108893612 (patch)
treeb3d9396b70cdc13321d89e7c5e7871f75aea4334 /include
parentd00c50b35101b862c3db270ffeba53a63a1063d9 (diff)
parent98ea02597b9967c0817d29fee2f96d21b9e59ca5 (diff)
Merge tag 'folio-5.18e' of git://git.infradead.org/users/willy/pagecache
Pull folio fixes from Matthew Wilcox: "Fewer bug reports than I was expecting from enabling large folios. One that doesn't show up on x86 but does on arm64, one that shows up with hugetlbfs memory failure testing and one that shows up with page migration, which it turns out I wasn't testing because my last NUMA machine died. Need to set up a qemu fake NUMA machine so I don't skip testing that in future. Summary: - Remove the migration code's assumptions about large pages being PMD sized - Don't call pmd_page() on a non-leaf PMD - Fix handling of hugetlbfs pages in page_vma_mapped_walk" * tag 'folio-5.18e' of git://git.infradead.org/users/willy/pagecache: mm/rmap: Fix handling of hugetlbfs pages in page_vma_mapped_walk mm/mempolicy: Use vma_alloc_folio() in new_page() mm: Add vma_alloc_folio() mm/migrate: Use a folio in migrate_misplaced_transhuge_page() mm/migrate: Use a folio in alloc_migration_target() mm/huge_memory: Avoid calling pmd_page() on a non-leaf PMD
Diffstat (limited to 'include')
-rw-r--r--include/linux/gfp.h8
1 files changed, 6 insertions, 2 deletions
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 761f8f1885c7..3e3d36fc2109 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -613,9 +613,11 @@ static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
#ifdef CONFIG_NUMA
struct page *alloc_pages(gfp_t gfp, unsigned int order);
struct folio *folio_alloc(gfp_t gfp, unsigned order);
-extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
+struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
struct vm_area_struct *vma, unsigned long addr,
bool hugepage);
+struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
+ unsigned long addr, bool hugepage);
#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
alloc_pages_vma(gfp_mask, order, vma, addr, true)
#else
@@ -627,8 +629,10 @@ static inline struct folio *folio_alloc(gfp_t gfp, unsigned int order)
{
return __folio_alloc_node(gfp, order, numa_node_id());
}
-#define alloc_pages_vma(gfp_mask, order, vma, addr, false)\
+#define alloc_pages_vma(gfp_mask, order, vma, addr, hugepage) \
alloc_pages(gfp_mask, order)
+#define vma_alloc_folio(gfp, order, vma, addr, hugepage) \
+ folio_alloc(gfp, order)
#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
alloc_pages(gfp_mask, order)
#endif