From 0a4954a850b0c4d0a5d18b1a55d6e5a653e362b5 Mon Sep 17 00:00:00 2001 From: Feng Tang Date: Thu, 6 Aug 2020 23:23:11 -0700 Subject: percpu_counter: add percpu_counter_sync() percpu_counter's accuracy is related to its batch size. For a percpu_counter with a big batch, its deviation could be big, so when the counter's batch is runtime changed to a smaller value for better accuracy, there could also be requirment to reduce the big deviation. So add a percpu-counter sync function to be run on each CPU. Reported-by: kernel test robot Signed-off-by: Feng Tang Signed-off-by: Andrew Morton Cc: Dennis Zhou Cc: Tejun Heo Cc: Christoph Lameter Cc: Michal Hocko Cc: Qian Cai Cc: Andi Kleen Cc: Huang Ying Cc: Dave Hansen Cc: Haiyang Zhang Cc: Johannes Weiner Cc: Kees Cook Cc: "K. Y. Srinivasan" Cc: Matthew Wilcox (Oracle) Cc: Mel Gorman Cc: Tim Chen Link: http://lkml.kernel.org/r/1594389708-60781-4-git-send-email-feng.tang@intel.com Signed-off-by: Linus Torvalds --- lib/percpu_counter.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) (limited to 'lib/percpu_counter.c') diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index a66595ba5543..a2345de90e93 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c @@ -98,6 +98,25 @@ void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch) } EXPORT_SYMBOL(percpu_counter_add_batch); +/* + * For percpu_counter with a big batch, the devication of its count could + * be big, and there is requirement to reduce the deviation, like when the + * counter's batch could be runtime decreased to get a better accuracy, + * which can be achieved by running this sync function on each CPU. + */ +void percpu_counter_sync(struct percpu_counter *fbc) +{ + unsigned long flags; + s64 count; + + raw_spin_lock_irqsave(&fbc->lock, flags); + count = __this_cpu_read(*fbc->counters); + fbc->count += count; + __this_cpu_sub(*fbc->counters, count); + raw_spin_unlock_irqrestore(&fbc->lock, flags); +} +EXPORT_SYMBOL(percpu_counter_sync); + /* * Add up all the per-cpu counts, return the result. This is a more accurate * but much slower version of percpu_counter_read_positive() -- cgit