summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2025-03-08 17:48:36 +0100
committerThomas Gleixner <tglx@linutronix.de>2025-03-13 12:07:17 +0100
commitfeb864ee99a2d8a22800342388401f3a3b90d42b (patch)
tree0995392e76f26df837664c08ca51cc940b9b0cac /kernel
parent538d710ec74233f99dc0fd604d45a2b6143c8e2c (diff)
posix-timers: Make signal_struct:: Next_posix_timer_id an atomic_t
The global hash_lock protecting the posix timer hash table can be heavily contended especially when there is an extensive linear search for a timer ID. Timer IDs are handed out by monotonically increasing next_posix_timer_id and then validating that there is no timer with the same ID in the hash table. Both operations happen with the global hash lock held. To reduce the hash lock contention the hash will be reworked to a scaled hash with per bucket locks, which requires to handle the ID counter lockless. Prepare for this by making next_posix_timer_id an atomic_t, which can be used lockless with atomic_inc_return(). [ tglx: Adopted from Eric's series, massaged change log and simplified it ] Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Frederic Weisbecker <frederic@kernel.org> Acked-by: Frederic Weisbecker <frederic@kernel.org> Link: https://lore.kernel.org/all/20250219125522.2535263-2-edumazet@google.com Link: https://lore.kernel.org/all/20250308155624.151545978@linutronix.de
Diffstat (limited to 'kernel')
-rw-r--r--kernel/time/posix-timers.c14
1 files changed, 5 insertions, 9 deletions
diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
index 991d12abae45..f9a70c1373b6 100644
--- a/kernel/time/posix-timers.c
+++ b/kernel/time/posix-timers.c
@@ -119,21 +119,17 @@ static bool posix_timer_hashed(struct hlist_head *head, struct signal_struct *si
static int posix_timer_add(struct k_itimer *timer)
{
struct signal_struct *sig = current->signal;
- struct hlist_head *head;
- unsigned int cnt, id;
/*
* FIXME: Replace this by a per signal struct xarray once there is
* a plan to handle the resulting CRIU regression gracefully.
*/
- for (cnt = 0; cnt <= INT_MAX; cnt++) {
- spin_lock(&hash_lock);
- id = sig->next_posix_timer_id;
-
- /* Write the next ID back. Clamp it to the positive space */
- sig->next_posix_timer_id = (id + 1) & INT_MAX;
+ for (unsigned int cnt = 0; cnt <= INT_MAX; cnt++) {
+ /* Get the next timer ID and clamp it to positive space */
+ unsigned int id = atomic_fetch_inc(&sig->next_posix_timer_id) & INT_MAX;
+ struct hlist_head *head = &posix_timers_hashtable[hash(sig, id)];
- head = &posix_timers_hashtable[hash(sig, id)];
+ spin_lock(&hash_lock);
if (!posix_timer_hashed(head, sig, id)) {
/*
* Set the timer ID and the signal pointer to make