diff options
| author | Thomas Gleixner <tglx@linutronix.de> | 2025-11-19 18:27:07 +0100 |
|---|---|---|
| committer | Thomas Gleixner <tglx@linutronix.de> | 2025-11-25 19:45:40 +0100 |
| commit | bf070520e398679cd582b3c3e44107bf22c143ba (patch) | |
| tree | 196e8b5c1551ba328dedef0dd24037d228a5b07d | |
| parent | 2b1642b881088bbf73fcb1147c474a198ec46729 (diff) | |
sched/mmcid: Move initialization out of line
It's getting bigger soon, so just move it out of line to the rest of the
code.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Link: https://patch.msgid.link/20251119172549.769636491@linutronix.de
| -rw-r--r-- | include/linux/mm_types.h | 15 | ||||
| -rw-r--r-- | kernel/sched/core.c | 14 |
2 files changed, 15 insertions, 14 deletions
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index bafb81b33922..3b7d05e7169c 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -1351,20 +1351,7 @@ static inline unsigned long *mm_cidmask(struct mm_struct *mm) return (unsigned long *)cid_bitmap; } -static inline void mm_init_cid(struct mm_struct *mm, struct task_struct *p) -{ - int i; - - for_each_possible_cpu(i) { - struct mm_cid_pcpu *pcpu = per_cpu_ptr(mm->mm_cid.pcpu, i); - - pcpu->cid = MM_CID_UNSET; - } - mm->mm_cid.nr_cpus_allowed = p->nr_cpus_allowed; - raw_spin_lock_init(&mm->mm_cid.lock); - cpumask_copy(mm_cpus_allowed(mm), &p->cpus_mask); - bitmap_zero(mm_cidmask(mm), num_possible_cpus()); -} +void mm_init_cid(struct mm_struct *mm, struct task_struct *p); static inline int mm_alloc_cid_noprof(struct mm_struct *mm, struct task_struct *p) { diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 3fdf90a7074d..34b6c31eca3a 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -10431,6 +10431,20 @@ void sched_mm_cid_fork(struct task_struct *t) WARN_ON_ONCE(!t->mm || t->mm_cid.cid != MM_CID_UNSET); t->mm_cid.active = 1; } + +void mm_init_cid(struct mm_struct *mm, struct task_struct *p) +{ + struct mm_cid_pcpu __percpu *pcpu = mm->mm_cid.pcpu; + int cpu; + + for_each_possible_cpu(cpu) + per_cpu_ptr(pcpu, cpu)->cid = MM_CID_UNSET; + + mm->mm_cid.nr_cpus_allowed = p->nr_cpus_allowed; + raw_spin_lock_init(&mm->mm_cid.lock); + cpumask_copy(mm_cpus_allowed(mm), &p->cpus_mask); + bitmap_zero(mm_cidmask(mm), num_possible_cpus()); +} #else /* CONFIG_SCHED_MM_CID */ static inline void mm_update_cpus_allowed(struct mm_struct *mm, const struct cpumask *affmsk) { } #endif /* !CONFIG_SCHED_MM_CID */ |
