summaryrefslogtreecommitdiff
path: root/kernel/rseq.c
diff options
context:
space:
mode:
authorMathieu Desnoyers <mathieu.desnoyers@efficios.com>2022-11-22 15:39:10 -0500
committerPeter Zijlstra <peterz@infradead.org>2022-12-27 12:52:12 +0100
commitf7b01bb0b57f994a44ea6368536b59062b796381 (patch)
treeb5e315e3661c3c74156b4136d6133dae3b741e8d /kernel/rseq.c
parentaf7f588d8f7355bc4298dd1962d7826358fc95f0 (diff)
rseq: Extend struct rseq with per-memory-map concurrency ID
If a memory map has fewer threads than there are cores on the system, or is limited to run on few cores concurrently through sched affinity or cgroup cpusets, the concurrency IDs will be values close to 0, thus allowing efficient use of user-space memory for per-cpu data structures. Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20221122203932.231377-9-mathieu.desnoyers@efficios.com
Diffstat (limited to 'kernel/rseq.c')
-rw-r--r--kernel/rseq.c11
1 files changed, 10 insertions, 1 deletions
diff --git a/kernel/rseq.c b/kernel/rseq.c
index 193cfcc94ddc..9de6e35fe679 100644
--- a/kernel/rseq.c
+++ b/kernel/rseq.c
@@ -90,12 +90,15 @@ static int rseq_update_cpu_node_id(struct task_struct *t)
struct rseq __user *rseq = t->rseq;
u32 cpu_id = raw_smp_processor_id();
u32 node_id = cpu_to_node(cpu_id);
+ u32 mm_cid = task_mm_cid(t);
+ WARN_ON_ONCE((int) mm_cid < 0);
if (!user_write_access_begin(rseq, t->rseq_len))
goto efault;
unsafe_put_user(cpu_id, &rseq->cpu_id_start, efault_end);
unsafe_put_user(cpu_id, &rseq->cpu_id, efault_end);
unsafe_put_user(node_id, &rseq->node_id, efault_end);
+ unsafe_put_user(mm_cid, &rseq->mm_cid, efault_end);
/*
* Additional feature fields added after ORIG_RSEQ_SIZE
* need to be conditionally updated only if
@@ -113,7 +116,8 @@ efault:
static int rseq_reset_rseq_cpu_node_id(struct task_struct *t)
{
- u32 cpu_id_start = 0, cpu_id = RSEQ_CPU_ID_UNINITIALIZED, node_id = 0;
+ u32 cpu_id_start = 0, cpu_id = RSEQ_CPU_ID_UNINITIALIZED, node_id = 0,
+ mm_cid = 0;
/*
* Reset cpu_id_start to its initial state (0).
@@ -133,6 +137,11 @@ static int rseq_reset_rseq_cpu_node_id(struct task_struct *t)
if (put_user(node_id, &t->rseq->node_id))
return -EFAULT;
/*
+ * Reset mm_cid to its initial state (0).
+ */
+ if (put_user(mm_cid, &t->rseq->mm_cid))
+ return -EFAULT;
+ /*
* Additional feature fields added after ORIG_RSEQ_SIZE
* need to be conditionally reset only if
* t->rseq_len != ORIG_RSEQ_SIZE.