summaryrefslogtreecommitdiff
path: root/kernel/ucount.c
diff options
context:
space:
mode:
authorEric W. Biederman <ebiederm@xmission.com>2021-10-16 14:05:34 -0500
committerEric W. Biederman <ebiederm@xmission.com>2021-10-20 10:45:34 -0500
commitda70d3109e74adf6ae9f8d1f6c7c0c9735d314ba (patch)
treedd7eb1eef0114f31f39de85906ad9ebae955fe37 /kernel/ucount.c
parent5fc9e37cd5ae762b856912cdedb226a884b3c3e0 (diff)
ucounts: Add get_ucounts_or_wrap for clarity
Add a helper function get_ucounts_or_wrap that is a trivial wrapper around atomic_add_negative, that makes it clear how atomic_add_negative is used in the context of ucounts. Link: https://lkml.kernel.org/r/87pms2qkr9.fsf_-_@disp2133 Tested-by: Yu Zhao <yuzhao@google.com> Reviewed-by: Alexey Gladkov <legion@kernel.org> Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
Diffstat (limited to 'kernel/ucount.c')
-rw-r--r--kernel/ucount.c14
1 files changed, 10 insertions, 4 deletions
diff --git a/kernel/ucount.c b/kernel/ucount.c
index 708d05164a7d..133b6044fda4 100644
--- a/kernel/ucount.c
+++ b/kernel/ucount.c
@@ -150,9 +150,15 @@ static void hlist_add_ucounts(struct ucounts *ucounts)
spin_unlock_irq(&ucounts_lock);
}
+static inline bool get_ucounts_or_wrap(struct ucounts *ucounts)
+{
+ /* Returns true on a successful get, false if the count wraps. */
+ return !atomic_add_negative(1, &ucounts->count);
+}
+
struct ucounts *get_ucounts(struct ucounts *ucounts)
{
- if (atomic_add_negative(1, &ucounts->count)) {
+ if (!get_ucounts_or_wrap(ucounts)) {
put_ucounts(ucounts);
ucounts = NULL;
}
@@ -163,7 +169,7 @@ struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid)
{
struct hlist_head *hashent = ucounts_hashentry(ns, uid);
struct ucounts *ucounts, *new;
- long overflow;
+ bool wrapped;
spin_lock_irq(&ucounts_lock);
ucounts = find_ucounts(ns, uid, hashent);
@@ -188,9 +194,9 @@ struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid)
return new;
}
}
- overflow = atomic_add_negative(1, &ucounts->count);
+ wrapped = !get_ucounts_or_wrap(ucounts);
spin_unlock_irq(&ucounts_lock);
- if (overflow) {
+ if (wrapped) {
put_ucounts(ucounts);
return NULL;
}