diff options
| -rw-r--r-- | include/linux/rseq_types.h | 2 | ||||
| -rw-r--r-- | kernel/sched/core.c | 22 |
2 files changed, 24 insertions, 0 deletions
diff --git a/include/linux/rseq_types.h b/include/linux/rseq_types.h index 0fab369999b6..574aba6fe97c 100644 --- a/include/linux/rseq_types.h +++ b/include/linux/rseq_types.h @@ -125,6 +125,7 @@ struct mm_cid_pcpu { * do not actually share the MM. * @lock: Spinlock to protect all fields except @pcpu. It also protects * the MM cid cpumask and the MM cidmask bitmap. + * @mutex: Mutex to serialize forks and exits related to this mm */ struct mm_mm_cid { struct mm_cid_pcpu __percpu *pcpu; @@ -132,6 +133,7 @@ struct mm_mm_cid { unsigned int nr_cpus_allowed; unsigned int users; raw_spinlock_t lock; + struct mutex mutex; }____cacheline_aligned_in_smp; #else /* CONFIG_SCHED_MM_CID */ struct mm_mm_cid { }; diff --git a/kernel/sched/core.c b/kernel/sched/core.c index f9295c42da22..01903cf03ab2 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -10370,6 +10370,25 @@ void call_trace_sched_update_nr_running(struct rq *rq, int count) #ifdef CONFIG_SCHED_MM_CID /* + * Concurrency IDentifier management + * + * Serialization rules: + * + * mm::mm_cid::mutex: Serializes fork() and exit() and therefore + * protects mm::mm_cid::users. + * + * mm::mm_cid::lock: Serializes mm_update_max_cids() and + * mm_update_cpus_allowed(). Nests in mm_cid::mutex + * and runqueue lock. + * + * The mm_cidmask bitmap is not protected by any of the mm::mm_cid locks + * and can only be modified with atomic operations. + * + * The mm::mm_cid:pcpu per CPU storage is protected by the CPUs runqueue + * lock. + */ + +/* * Update the CID range properties when the constraints change. Invoked via * fork(), exit() and affinity changes */ @@ -10412,6 +10431,7 @@ void sched_mm_cid_fork(struct task_struct *t) WARN_ON_ONCE(!mm || t->mm_cid.cid != MM_CID_UNSET); + guard(mutex)(&mm->mm_cid.mutex); guard(raw_spinlock)(&mm->mm_cid.lock); t->mm_cid.active = 1; mm->mm_cid.users++; @@ -10431,6 +10451,7 @@ void sched_mm_cid_exit(struct task_struct *t) if (!mm || !t->mm_cid.active) return; + guard(mutex)(&mm->mm_cid.mutex); guard(raw_spinlock)(&mm->mm_cid.lock); t->mm_cid.active = 0; mm->mm_cid.users--; @@ -10467,6 +10488,7 @@ void mm_init_cid(struct mm_struct *mm, struct task_struct *p) mm->mm_cid.nr_cpus_allowed = p->nr_cpus_allowed; mm->mm_cid.users = 0; raw_spin_lock_init(&mm->mm_cid.lock); + mutex_init(&mm->mm_cid.mutex); cpumask_copy(mm_cpus_allowed(mm), &p->cpus_mask); bitmap_zero(mm_cidmask(mm), num_possible_cpus()); } |
