summaryrefslogtreecommitdiff
path: root/lib/percpu_counter.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/percpu_counter.c')
-rw-r--r--lib/percpu_counter.c15
1 files changed, 12 insertions, 3 deletions
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index dba56c5c1837..0e096311e0c0 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -131,7 +131,7 @@ static s64 __percpu_counter_sum_mask(struct percpu_counter *fbc,
raw_spin_lock_irqsave(&fbc->lock, flags);
ret = fbc->count;
- for_each_cpu(cpu, cpu_mask) {
+ for_each_cpu_or(cpu, cpu_online_mask, cpu_mask) {
s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
ret += *pcount;
}
@@ -141,11 +141,20 @@ static s64 __percpu_counter_sum_mask(struct percpu_counter *fbc,
/*
* Add up all the per-cpu counts, return the result. This is a more accurate
- * but much slower version of percpu_counter_read_positive()
+ * but much slower version of percpu_counter_read_positive().
+ *
+ * We use the cpu mask of (cpu_online_mask | cpu_dying_mask) to capture sums
+ * from CPUs that are in the process of being taken offline. Dying cpus have
+ * been removed from the online mask, but may not have had the hotplug dead
+ * notifier called to fold the percpu count back into the global counter sum.
+ * By including dying CPUs in the iteration mask, we avoid this race condition
+ * so __percpu_counter_sum() just does the right thing when CPUs are being taken
+ * offline.
*/
s64 __percpu_counter_sum(struct percpu_counter *fbc)
{
- return __percpu_counter_sum_mask(fbc, cpu_online_mask);
+
+ return __percpu_counter_sum_mask(fbc, cpu_dying_mask);
}
EXPORT_SYMBOL(__percpu_counter_sum);