summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMel Gorman <mgorman@techsingularity.net>2017-02-24 14:56:56 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2017-02-24 17:46:54 -0800
commitbd233f538d51c2cae6f0bfc2cf7f0960e1683b8a (patch)
tree79b96403eb3dfa35dfefc8f8acfd2e1d08e324f0
parent5104782011a12b04fe9cfaa6f1085bdcdedd79c4 (diff)
mm, page_alloc: use static global work_struct for draining per-cpu pages
As suggested by Vlastimil Babka and Tejun Heo, this patch uses a static work_struct to co-ordinate the draining of per-cpu pages on the workqueue. Only one task can drain at a time but this is better than the previous scheme that allowed multiple tasks to send IPIs at a time. One consideration is whether parallel requests should synchronise against each other. This patch does not synchronise for a global drain as the common case for such callers is expected to be multiple parallel direct reclaimers competing for pages when the watermark is close to min. Draining the per-cpu list is unlikely to make much progress and serialising the drain is of dubious merit. Drains are synchonrised for callers such as memory hotplug and CMA that care about the drain being complete when the function returns. Link: http://lkml.kernel.org/r/20170125083038.rzb5f43nptmk7aed@techsingularity.net Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Suggested-by: Tejun Heo <tj@kernel.org> Suggested-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Hillf Danton <hillf.zj@alibaba-inc.com> Cc: Jesper Dangaard Brouer <brouer@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/page_alloc.c40
1 files changed, 22 insertions, 18 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 46c30fa26acd..41985aa4672d 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -92,6 +92,10 @@ EXPORT_PER_CPU_SYMBOL(_numa_mem_);
int _node_numa_mem_[MAX_NUMNODES];
#endif
+/* work_structs for global per-cpu drains */
+DEFINE_MUTEX(pcpu_drain_mutex);
+DEFINE_PER_CPU(struct work_struct, pcpu_drain);
+
#ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
volatile unsigned long latent_entropy __latent_entropy;
EXPORT_SYMBOL(latent_entropy);
@@ -2360,7 +2364,6 @@ static void drain_local_pages_wq(struct work_struct *work)
*/
void drain_all_pages(struct zone *zone)
{
- struct work_struct __percpu *works;
int cpu;
/*
@@ -2373,7 +2376,16 @@ void drain_all_pages(struct zone *zone)
if (current->flags & PF_WQ_WORKER)
return;
- works = alloc_percpu_gfp(struct work_struct, GFP_ATOMIC);
+ /*
+ * Do not drain if one is already in progress unless it's specific to
+ * a zone. Such callers are primarily CMA and memory hotplug and need
+ * the drain to be complete when the call returns.
+ */
+ if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) {
+ if (!zone)
+ return;
+ mutex_lock(&pcpu_drain_mutex);
+ }
/*
* We don't care about racing with CPU hotplug event
@@ -2406,23 +2418,15 @@ void drain_all_pages(struct zone *zone)
cpumask_clear_cpu(cpu, &cpus_with_pcps);
}
- if (works) {
- for_each_cpu(cpu, &cpus_with_pcps) {
- struct work_struct *work = per_cpu_ptr(works, cpu);
- INIT_WORK(work, drain_local_pages_wq);
- schedule_work_on(cpu, work);
- }
- for_each_cpu(cpu, &cpus_with_pcps)
- flush_work(per_cpu_ptr(works, cpu));
- } else {
- for_each_cpu(cpu, &cpus_with_pcps) {
- struct work_struct work;
-
- INIT_WORK(&work, drain_local_pages_wq);
- schedule_work_on(cpu, &work);
- flush_work(&work);
- }
+ for_each_cpu(cpu, &cpus_with_pcps) {
+ struct work_struct *work = per_cpu_ptr(&pcpu_drain, cpu);
+ INIT_WORK(work, drain_local_pages_wq);
+ schedule_work_on(cpu, work);
}
+ for_each_cpu(cpu, &cpus_with_pcps)
+ flush_work(per_cpu_ptr(&pcpu_drain, cpu));
+
+ mutex_unlock(&pcpu_drain_mutex);
}
#ifdef CONFIG_HIBERNATION