summaryrefslogtreecommitdiff
path: root/include/net/inet_frag.h
diff options
context:
space:
mode:
authorNikolay Borisov <nborisov@suse.com>2017-06-20 21:01:20 +0300
committerTejun Heo <tj@kernel.org>2017-06-20 15:42:32 -0400
commit104b4e5139fe384431ac11c3b8a6cf4a529edf4a (patch)
treee8a0157f2294f006f31e535949327b3484a5dcb8 /include/net/inet_frag.h
parentdf95e795a722892a9e0603ce4b9b62fab9f02967 (diff)
percpu_counter: Rename __percpu_counter_add to percpu_counter_add_batch
Currently, percpu_counter_add is a wrapper around __percpu_counter_add which is preempt safe due to explicit calls to preempt_disable. Given how __ prefix is used in percpu related interfaces, the naming unfortunately creates the false sense that __percpu_counter_add is less safe than percpu_counter_add. In terms of context-safety, they're equivalent. The only difference is that the __ version takes a batch parameter. Make this a bit more explicit by just renaming __percpu_counter_add to percpu_counter_add_batch. This patch doesn't cause any functional changes. tj: Minor updates to patch description for clarity. Cosmetic indentation updates. Signed-off-by: Nikolay Borisov <nborisov@suse.com> Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Chris Mason <clm@fb.com> Cc: Josef Bacik <jbacik@fb.com> Cc: David Sterba <dsterba@suse.com> Cc: Darrick J. Wong <darrick.wong@oracle.com> Cc: Jan Kara <jack@suse.com> Cc: Jens Axboe <axboe@fb.com> Cc: linux-mm@kvack.org Cc: "David S. Miller" <davem@davemloft.net>
Diffstat (limited to 'include/net/inet_frag.h')
-rw-r--r--include/net/inet_frag.h4
1 files changed, 2 insertions, 2 deletions
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index 5894730ec82a..5932e6de8fc0 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -154,12 +154,12 @@ static inline int frag_mem_limit(struct netns_frags *nf)
static inline void sub_frag_mem_limit(struct netns_frags *nf, int i)
{
- __percpu_counter_add(&nf->mem, -i, frag_percpu_counter_batch);
+ percpu_counter_add_batch(&nf->mem, -i, frag_percpu_counter_batch);
}
static inline void add_frag_mem_limit(struct netns_frags *nf, int i)
{
- __percpu_counter_add(&nf->mem, i, frag_percpu_counter_batch);
+ percpu_counter_add_batch(&nf->mem, i, frag_percpu_counter_batch);
}
static inline unsigned int sum_frag_mem_limit(struct netns_frags *nf)