summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--mm/page_alloc.c22
1 files changed, 16 insertions, 6 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2cd1f9bb1b52..75865e1325b5 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -97,8 +97,12 @@ int _node_numa_mem_[MAX_NUMNODES];
#endif
/* work_structs for global per-cpu drains */
+struct pcpu_drain {
+ struct zone *zone;
+ struct work_struct work;
+};
DEFINE_MUTEX(pcpu_drain_mutex);
-DEFINE_PER_CPU(struct work_struct, pcpu_drain);
+DEFINE_PER_CPU(struct pcpu_drain, pcpu_drain);
#ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
volatile unsigned long latent_entropy __latent_entropy;
@@ -2658,6 +2662,10 @@ void drain_local_pages(struct zone *zone)
static void drain_local_pages_wq(struct work_struct *work)
{
+ struct pcpu_drain *drain;
+
+ drain = container_of(work, struct pcpu_drain, work);
+
/*
* drain_all_pages doesn't use proper cpu hotplug protection so
* we can race with cpu offline when the WQ can move this from
@@ -2666,7 +2674,7 @@ static void drain_local_pages_wq(struct work_struct *work)
* a different one.
*/
preempt_disable();
- drain_local_pages(NULL);
+ drain_local_pages(drain->zone);
preempt_enable();
}
@@ -2737,12 +2745,14 @@ void drain_all_pages(struct zone *zone)
}
for_each_cpu(cpu, &cpus_with_pcps) {
- struct work_struct *work = per_cpu_ptr(&pcpu_drain, cpu);
- INIT_WORK(work, drain_local_pages_wq);
- queue_work_on(cpu, mm_percpu_wq, work);
+ struct pcpu_drain *drain = per_cpu_ptr(&pcpu_drain, cpu);
+
+ drain->zone = zone;
+ INIT_WORK(&drain->work, drain_local_pages_wq);
+ queue_work_on(cpu, mm_percpu_wq, &drain->work);
}
for_each_cpu(cpu, &cpus_with_pcps)
- flush_work(per_cpu_ptr(&pcpu_drain, cpu));
+ flush_work(&per_cpu_ptr(&pcpu_drain, cpu)->work);
mutex_unlock(&pcpu_drain_mutex);
}