summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/linux/sched.h1
-rw-r--r--include/linux/signal.h1
-rw-r--r--kernel/exit.c1
-rw-r--r--kernel/fork.c1
-rw-r--r--kernel/signal.c44
5 files changed, 46 insertions, 2 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 05572e2140ad..f5ca798acb3a 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -984,6 +984,7 @@ struct task_struct {
/* Signal handlers: */
struct signal_struct *signal;
struct sighand_struct __rcu *sighand;
+ struct sigqueue *sigqueue_cache;
sigset_t blocked;
sigset_t real_blocked;
/* Restored if set_restore_sigmask() was used: */
diff --git a/include/linux/signal.h b/include/linux/signal.h
index 205526c4003a..c3cbea266136 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -265,6 +265,7 @@ static inline void init_sigpending(struct sigpending *sig)
}
extern void flush_sigqueue(struct sigpending *queue);
+extern void exit_task_sigqueue_cache(struct task_struct *tsk);
/* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */
static inline int valid_signal(unsigned long sig)
diff --git a/kernel/exit.c b/kernel/exit.c
index 04029e35e69a..0596526ed9ea 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -162,6 +162,7 @@ static void __exit_signal(struct task_struct *tsk)
flush_sigqueue(&sig->shared_pending);
tty_kref_put(tty);
}
+ exit_task_sigqueue_cache(tsk);
}
static void delayed_put_task_struct(struct rcu_head *rhp)
diff --git a/kernel/fork.c b/kernel/fork.c
index d3171e8e88e5..3c43a9f3e75c 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1995,6 +1995,7 @@ static __latent_entropy struct task_struct *copy_process(
spin_lock_init(&p->alloc_lock);
init_sigpending(&p->pending);
+ p->sigqueue_cache = NULL;
p->utime = p->stime = p->gtime = 0;
#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
diff --git a/kernel/signal.c b/kernel/signal.c
index 568a2e2fc9ab..2d9463e05ae6 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -433,7 +433,16 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
rcu_read_unlock();
if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
- q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
+ /*
+ * Preallocation does not hold sighand::siglock so it can't
+ * use the cache. The lockless caching requires that only
+ * one consumer and only one producer run at a time.
+ */
+ q = READ_ONCE(t->sigqueue_cache);
+ if (!q || sigqueue_flags)
+ q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
+ else
+ WRITE_ONCE(t->sigqueue_cache, NULL);
} else {
print_dropped_signal(sig);
}
@@ -450,13 +459,44 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
return q;
}
+void exit_task_sigqueue_cache(struct task_struct *tsk)
+{
+ /* Race free because @tsk is mopped up */
+ struct sigqueue *q = tsk->sigqueue_cache;
+
+ if (q) {
+ tsk->sigqueue_cache = NULL;
+ /*
+ * Hand it back to the cache as the task might
+ * be self reaping which would leak the object.
+ */
+ kmem_cache_free(sigqueue_cachep, q);
+ }
+}
+
+static void sigqueue_cache_or_free(struct sigqueue *q)
+{
+ /*
+ * Cache one sigqueue per task. This pairs with the consumer side
+ * in __sigqueue_alloc() and needs READ/WRITE_ONCE() to prevent the
+ * compiler from store tearing and to tell KCSAN that the data race
+ * is intentional when run without holding current->sighand->siglock,
+ * which is fine as current obviously cannot run __sigqueue_free()
+ * concurrently.
+ */
+ if (!READ_ONCE(current->sigqueue_cache))
+ WRITE_ONCE(current->sigqueue_cache, q);
+ else
+ kmem_cache_free(sigqueue_cachep, q);
+}
+
static void __sigqueue_free(struct sigqueue *q)
{
if (q->flags & SIGQUEUE_PREALLOC)
return;
if (atomic_dec_and_test(&q->user->sigpending))
free_uid(q->user);
- kmem_cache_free(sigqueue_cachep, q);
+ sigqueue_cache_or_free(q);
}
void flush_sigqueue(struct sigpending *queue)