summaryrefslogtreecommitdiff
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-07-15 12:17:05 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2021-07-15 12:17:05 -0700
commitdd9c7df94c1b23feacd54112f33ad95d93f64533 (patch)
treefbfd54b8f19a8c2bb56696d05e221b7351081eec /mm/page_alloc.c
parenta1c9ca5f65c9acfd7c02474b9d5cacbd7ea288df (diff)
parentd08af0a59684e18a51aa4bfd24c658994ea3fc5b (diff)
Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "13 patches. Subsystems affected by this patch series: mm (kasan, pagealloc, rmap, hmm, and hugetlb), and hfs" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: mm/hugetlb: fix refs calculation from unaligned @vaddr hfs: add lock nesting notation to hfs_find_init hfs: fix high memory mapping in hfs_bnode_read hfs: add missing clean-up in hfs_fill_super lib/test_hmm: remove set but unused page variable mm: fix the try_to_unmap prototype for !CONFIG_MMU mm/page_alloc: further fix __alloc_pages_bulk() return value mm/page_alloc: correct return value when failing at preparing mm/page_alloc: avoid page allocator recursion with pagesets.lock held Revert "mm/page_alloc: make should_fail_alloc_page() static" kasan: fix build by including kernel.h kasan: add memzero init for unaligned size at DEBUG mm: move helper to check slub_debug_enabled
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c28
1 files changed, 21 insertions, 7 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 3b97e17806be..3e97e68aef7a 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3820,7 +3820,7 @@ static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
#endif /* CONFIG_FAIL_PAGE_ALLOC */
-static noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
+noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
{
return __should_fail_alloc_page(gfp_mask, order);
}
@@ -5221,9 +5221,6 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
unsigned int alloc_flags = ALLOC_WMARK_LOW;
int nr_populated = 0, nr_account = 0;
- if (unlikely(nr_pages <= 0))
- return 0;
-
/*
* Skip populated array elements to determine if any pages need
* to be allocated before disabling IRQs.
@@ -5231,19 +5228,35 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
while (page_array && nr_populated < nr_pages && page_array[nr_populated])
nr_populated++;
+ /* No pages requested? */
+ if (unlikely(nr_pages <= 0))
+ goto out;
+
/* Already populated array? */
if (unlikely(page_array && nr_pages - nr_populated == 0))
- return nr_populated;
+ goto out;
/* Use the single page allocator for one page. */
if (nr_pages - nr_populated == 1)
goto failed;
+#ifdef CONFIG_PAGE_OWNER
+ /*
+ * PAGE_OWNER may recurse into the allocator to allocate space to
+ * save the stack with pagesets.lock held. Releasing/reacquiring
+ * removes much of the performance benefit of bulk allocation so
+ * force the caller to allocate one page at a time as it'll have
+ * similar performance to added complexity to the bulk allocator.
+ */
+ if (static_branch_unlikely(&page_owner_inited))
+ goto failed;
+#endif
+
/* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */
gfp &= gfp_allowed_mask;
alloc_gfp = gfp;
if (!prepare_alloc_pages(gfp, 0, preferred_nid, nodemask, &ac, &alloc_gfp, &alloc_flags))
- return 0;
+ goto out;
gfp = alloc_gfp;
/* Find an allowed local zone that meets the low watermark. */
@@ -5311,6 +5324,7 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
__count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account);
zone_statistics(ac.preferred_zoneref->zone, zone, nr_account);
+out:
return nr_populated;
failed_irq:
@@ -5326,7 +5340,7 @@ failed:
nr_populated++;
}
- return nr_populated;
+ goto out;
}
EXPORT_SYMBOL_GPL(__alloc_pages_bulk);