summaryrefslogtreecommitdiff
path: root/lib/percpu_counter.c
diff options
context:
space:
mode:
authorHugh Dickins <hughd@google.com>2023-09-29 20:42:45 -0700
committerAndrew Morton <akpm@linux-foundation.org>2023-10-18 14:34:14 -0700
commitbeb9868628445306958fd7b2da1cd369a4a381cc (patch)
tree52d12a5752afdbde62d0f55ed3230c1eac56864b /lib/percpu_counter.c
parent3022fd7af9604d44ec43da8a4398872989599b18 (diff)
shmem,percpu_counter: add _limited_add(fbc, limit, amount)
Percpu counter's compare and add are separate functions: without locking around them (which would defeat their purpose), it has been possible to overflow the intended limit. Imagine all the other CPUs fallocating tmpfs huge pages to the limit, in between this CPU's compare and its add. I have not seen reports of that happening; but tmpfs's recent addition of dquot_alloc_block_nodirty() in between the compare and the add makes it even more likely, and I'd be uncomfortable to leave it unfixed. Introduce percpu_counter_limited_add(fbc, limit, amount) to prevent it. I believe this implementation is correct, and slightly more efficient than the combination of compare and add (taking the lock once rather than twice when nearing full - the last 128MiB of a tmpfs volume on a machine with 128 CPUs and 4KiB pages); but it does beg for a better design - when nearing full, there is no new batching, but the costly percpu counter sum across CPUs still has to be done, while locked. Follow __percpu_counter_sum()'s example, including cpu_dying_mask as well as cpu_online_mask: but shouldn't __percpu_counter_compare() and __percpu_counter_limited_add() then be adding a num_dying_cpus() to num_online_cpus(), when they calculate the maximum which could be held across CPUs? But the times when it matters would be vanishingly rare. Link: https://lkml.kernel.org/r/bb817848-2d19-bcc8-39ca-ea179af0f0b4@google.com Signed-off-by: Hugh Dickins <hughd@google.com> Reviewed-by: Jan Kara <jack@suse.cz> Cc: Tim Chen <tim.c.chen@intel.com> Cc: Dave Chinner <dchinner@redhat.com> Cc: Darrick J. Wong <djwong@kernel.org> Cc: Axel Rasmussen <axelrasmussen@google.com> Cc: Carlos Maiolino <cem@kernel.org> Cc: Christian Brauner <brauner@kernel.org> Cc: Chuck Lever <chuck.lever@oracle.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'lib/percpu_counter.c')
-rw-r--r--lib/percpu_counter.c53
1 files changed, 53 insertions, 0 deletions
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
index 9073430dc865..58a3392f471b 100644
--- a/lib/percpu_counter.c
+++ b/lib/percpu_counter.c
@@ -278,6 +278,59 @@ int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
}
EXPORT_SYMBOL(__percpu_counter_compare);
+/*
+ * Compare counter, and add amount if the total is within limit.
+ * Return true if amount was added, false if it would exceed limit.
+ */
+bool __percpu_counter_limited_add(struct percpu_counter *fbc,
+ s64 limit, s64 amount, s32 batch)
+{
+ s64 count;
+ s64 unknown;
+ unsigned long flags;
+ bool good;
+
+ if (amount > limit)
+ return false;
+
+ local_irq_save(flags);
+ unknown = batch * num_online_cpus();
+ count = __this_cpu_read(*fbc->counters);
+
+ /* Skip taking the lock when safe */
+ if (abs(count + amount) <= batch &&
+ fbc->count + unknown <= limit) {
+ this_cpu_add(*fbc->counters, amount);
+ local_irq_restore(flags);
+ return true;
+ }
+
+ raw_spin_lock(&fbc->lock);
+ count = fbc->count + amount;
+
+ /* Skip percpu_counter_sum() when safe */
+ if (count + unknown > limit) {
+ s32 *pcount;
+ int cpu;
+
+ for_each_cpu_or(cpu, cpu_online_mask, cpu_dying_mask) {
+ pcount = per_cpu_ptr(fbc->counters, cpu);
+ count += *pcount;
+ }
+ }
+
+ good = count <= limit;
+ if (good) {
+ count = __this_cpu_read(*fbc->counters);
+ fbc->count += count + amount;
+ __this_cpu_sub(*fbc->counters, count);
+ }
+
+ raw_spin_unlock(&fbc->lock);
+ local_irq_restore(flags);
+ return good;
+}
+
static int __init percpu_counter_startup(void)
{
int ret;