summaryrefslogtreecommitdiff
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c73
1 files changed, 61 insertions, 12 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a259c22e4609..40baa2421136 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3026,13 +3026,16 @@ static void drain_local_pages_wq(struct work_struct *work)
}
/*
- * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
- *
- * When zone parameter is non-NULL, spill just the single zone's pages.
+ * The implementation of drain_all_pages(), exposing an extra parameter to
+ * drain on all cpus.
*
- * Note that this can be extremely slow as the draining happens in a workqueue.
+ * drain_all_pages() is optimized to only execute on cpus where pcplists are
+ * not empty. The check for non-emptiness can however race with a free to
+ * pcplist that has not yet increased the pcp->count from 0 to 1. Callers
+ * that need the guarantee that every CPU has drained can disable the
+ * optimizing racy check.
*/
-void drain_all_pages(struct zone *zone)
+void __drain_all_pages(struct zone *zone, bool force_all_cpus)
{
int cpu;
@@ -3071,7 +3074,13 @@ void drain_all_pages(struct zone *zone)
struct zone *z;
bool has_pcps = false;
- if (zone) {
+ if (force_all_cpus) {
+ /*
+ * The pcp.count check is racy, some callers need a
+ * guarantee that no cpu is missed.
+ */
+ has_pcps = true;
+ } else if (zone) {
pcp = per_cpu_ptr(zone->pageset, cpu);
if (pcp->pcp.count)
has_pcps = true;
@@ -3104,6 +3113,18 @@ void drain_all_pages(struct zone *zone)
mutex_unlock(&pcpu_drain_mutex);
}
+/*
+ * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
+ *
+ * When zone parameter is non-NULL, spill just the single zone's pages.
+ *
+ * Note that this can be extremely slow as the draining happens in a workqueue.
+ */
+void drain_all_pages(struct zone *zone)
+{
+ __drain_all_pages(zone, false);
+}
+
#ifdef CONFIG_HIBERNATION
/*
@@ -6316,6 +6337,18 @@ static void pageset_init(struct per_cpu_pageset *p)
pcp->batch = BOOT_PAGESET_BATCH;
}
+void __zone_set_pageset_high_and_batch(struct zone *zone, unsigned long high,
+ unsigned long batch)
+{
+ struct per_cpu_pageset *p;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ p = per_cpu_ptr(zone->pageset, cpu);
+ pageset_update(&p->pcp, high, batch);
+ }
+}
+
/*
* Calculate and set new high and batch values for all per-cpu pagesets of a
* zone, based on the zone's size and the percpu_pagelist_fraction sysctl.
@@ -6323,8 +6356,6 @@ static void pageset_init(struct per_cpu_pageset *p)
static void zone_set_pageset_high_and_batch(struct zone *zone)
{
unsigned long new_high, new_batch;
- struct per_cpu_pageset *p;
- int cpu;
if (percpu_pagelist_fraction) {
new_high = zone_managed_pages(zone) / percpu_pagelist_fraction;
@@ -6344,10 +6375,7 @@ static void zone_set_pageset_high_and_batch(struct zone *zone)
zone->pageset_high = new_high;
zone->pageset_batch = new_batch;
- for_each_possible_cpu(cpu) {
- p = per_cpu_ptr(zone->pageset, cpu);
- pageset_update(&p->pcp, new_high, new_batch);
- }
+ __zone_set_pageset_high_and_batch(zone, new_high, new_batch);
}
void __meminit setup_zone_pageset(struct zone *zone)
@@ -8742,6 +8770,27 @@ void __meminit zone_pcp_update(struct zone *zone)
mutex_unlock(&pcp_batch_high_lock);
}
+/*
+ * Effectively disable pcplists for the zone by setting the high limit to 0
+ * and draining all cpus. A concurrent page freeing on another CPU that's about
+ * to put the page on pcplist will either finish before the drain and the page
+ * will be drained, or observe the new high limit and skip the pcplist.
+ *
+ * Must be paired with a call to zone_pcp_enable().
+ */
+void zone_pcp_disable(struct zone *zone)
+{
+ mutex_lock(&pcp_batch_high_lock);
+ __zone_set_pageset_high_and_batch(zone, 0, 1);
+ __drain_all_pages(zone, true);
+}
+
+void zone_pcp_enable(struct zone *zone)
+{
+ __zone_set_pageset_high_and_batch(zone, zone->pageset_high, zone->pageset_batch);
+ mutex_unlock(&pcp_batch_high_lock);
+}
+
void zone_pcp_reset(struct zone *zone)
{
unsigned long flags;