summaryrefslogtreecommitdiff
path: root/kernel/ucount.c
diff options
context:
space:
mode:
authorAlexey Gladkov <legion@kernel.org>2021-04-22 14:27:10 +0200
committerEric W. Biederman <ebiederm@xmission.com>2021-04-30 14:14:01 -0500
commitb6c336528926ef73b0f70260f2636de2c3b94c14 (patch)
tree2bf2101c651ec8bb6de80cf738b0cf76a7c40374 /kernel/ucount.c
parent905ae01c4ae2ae3df05bb141801b1db4b7d83c61 (diff)
Use atomic_t for ucounts reference counting
The current implementation of the ucounts reference counter requires the use of spin_lock. We're going to use get_ucounts() in more performance critical areas like a handling of RLIMIT_SIGPENDING. Now we need to use spin_lock only if we want to change the hashtable. v10: * Always try to put ucounts in case we cannot increase ucounts->count. This will allow to cover the case when all consumers will return ucounts at once. v9: * Use a negative value to check that the ucounts->count is close to overflow. Signed-off-by: Alexey Gladkov <legion@kernel.org> Link: https://lkml.kernel.org/r/94d1dbecab060a6b116b0a2d1accd8ca1bbb4f5f.1619094428.git.legion@kernel.org Signed-off-by: Eric W. Biederman <ebiederm@xmission.com>
Diffstat (limited to 'kernel/ucount.c')
-rw-r--r--kernel/ucount.c53
1 files changed, 19 insertions, 34 deletions
diff --git a/kernel/ucount.c b/kernel/ucount.c
index 50cc1dfb7d28..365865f368ec 100644
--- a/kernel/ucount.c
+++ b/kernel/ucount.c
@@ -11,7 +11,7 @@
struct ucounts init_ucounts = {
.ns = &init_user_ns,
.uid = GLOBAL_ROOT_UID,
- .count = 1,
+ .count = ATOMIC_INIT(1),
};
#define UCOUNTS_HASHTABLE_BITS 10
@@ -139,6 +139,15 @@ static void hlist_add_ucounts(struct ucounts *ucounts)
spin_unlock_irq(&ucounts_lock);
}
+struct ucounts *get_ucounts(struct ucounts *ucounts)
+{
+ if (ucounts && atomic_add_negative(1, &ucounts->count)) {
+ put_ucounts(ucounts);
+ ucounts = NULL;
+ }
+ return ucounts;
+}
+
struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid)
{
struct hlist_head *hashent = ucounts_hashentry(ns, uid);
@@ -155,7 +164,7 @@ struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid)
new->ns = ns;
new->uid = uid;
- new->count = 0;
+ atomic_set(&new->count, 1);
spin_lock_irq(&ucounts_lock);
ucounts = find_ucounts(ns, uid, hashent);
@@ -163,33 +172,12 @@ struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid)
kfree(new);
} else {
hlist_add_head(&new->node, hashent);
- ucounts = new;
+ spin_unlock_irq(&ucounts_lock);
+ return new;
}
}
- if (ucounts->count == INT_MAX)
- ucounts = NULL;
- else
- ucounts->count += 1;
spin_unlock_irq(&ucounts_lock);
- return ucounts;
-}
-
-struct ucounts *get_ucounts(struct ucounts *ucounts)
-{
- unsigned long flags;
-
- if (!ucounts)
- return NULL;
-
- spin_lock_irqsave(&ucounts_lock, flags);
- if (ucounts->count == INT_MAX) {
- WARN_ONCE(1, "ucounts: counter has reached its maximum value");
- ucounts = NULL;
- } else {
- ucounts->count += 1;
- }
- spin_unlock_irqrestore(&ucounts_lock, flags);
-
+ ucounts = get_ucounts(ucounts);
return ucounts;
}
@@ -197,15 +185,12 @@ void put_ucounts(struct ucounts *ucounts)
{
unsigned long flags;
- spin_lock_irqsave(&ucounts_lock, flags);
- ucounts->count -= 1;
- if (!ucounts->count)
+ if (atomic_dec_and_test(&ucounts->count)) {
+ spin_lock_irqsave(&ucounts_lock, flags);
hlist_del_init(&ucounts->node);
- else
- ucounts = NULL;
- spin_unlock_irqrestore(&ucounts_lock, flags);
-
- kfree(ucounts);
+ spin_unlock_irqrestore(&ucounts_lock, flags);
+ kfree(ucounts);
+ }
}
static inline bool atomic_long_inc_below(atomic_long_t *v, int u)