diff options
| author | JP Kobryn <inwardvessel@gmail.com> | 2025-05-14 17:19:35 -0700 | 
|---|---|---|
| committer | Tejun Heo <tj@kernel.org> | 2025-05-19 10:29:42 -1000 | 
| commit | 748922dcfabdd655d25fb6dd09a60e694a3d35e6 (patch) | |
| tree | bac6da9c031fab4a6d40efa2a4feb0494cda38d6 /kernel | |
| parent | 5da3bfa029d6809e192d112f39fca4dbe0137aaf (diff) | |
cgroup: use subsystem-specific rstat locks to avoid contention
It is possible to eliminate contention between subsystems when
updating/flushing stats by using subsystem-specific locks. Let the existing
rstat locks be dedicated to the cgroup base stats and rename them to
reflect that. Add similar locks to the cgroup_subsys struct for use with
individual subsystems.
Lock initialization is done in the new function ss_rstat_init(ss) which
replaces cgroup_rstat_boot(void). If NULL is passed to this function, the
global base stat locks will be initialized. Otherwise, the subsystem locks
will be initialized.
Change the existing lock helper functions to accept a reference to a css.
Then within these functions, conditionally select the appropriate locks
based on the subsystem affiliation of the given css. Add helper functions
for this selection routine to avoid repeated code.
Signed-off-by: JP Kobryn <inwardvessel@gmail.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/cgroup/cgroup-internal.h | 2 | ||||
| -rw-r--r-- | kernel/cgroup/cgroup.c | 3 | ||||
| -rw-r--r-- | kernel/cgroup/rstat.c | 98 | 
3 files changed, 71 insertions, 32 deletions
diff --git a/kernel/cgroup/cgroup-internal.h b/kernel/cgroup/cgroup-internal.h index c161d34be634..b14e61c64a34 100644 --- a/kernel/cgroup/cgroup-internal.h +++ b/kernel/cgroup/cgroup-internal.h @@ -272,7 +272,7 @@ int cgroup_task_count(const struct cgroup *cgrp);   */  int css_rstat_init(struct cgroup_subsys_state *css);  void css_rstat_exit(struct cgroup_subsys_state *css); -void cgroup_rstat_boot(void); +int ss_rstat_init(struct cgroup_subsys *ss);  void cgroup_base_stat_cputime_show(struct seq_file *seq);  /* diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 45097dc9e099..44baa0318713 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -6088,6 +6088,7 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss, bool early)  		css->id = cgroup_idr_alloc(&ss->css_idr, css, 1, 2, GFP_KERNEL);  		BUG_ON(css->id < 0); +		BUG_ON(ss_rstat_init(ss));  		BUG_ON(css_rstat_init(css));  	} @@ -6167,7 +6168,7 @@ int __init cgroup_init(void)  	BUG_ON(cgroup_init_cftypes(NULL, cgroup_psi_files));  	BUG_ON(cgroup_init_cftypes(NULL, cgroup1_base_files)); -	cgroup_rstat_boot(); +	BUG_ON(ss_rstat_init(NULL));  	get_user_ns(init_cgroup_ns.user_ns); diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c index 6ce134a7294d..0bb609e73bde 100644 --- a/kernel/cgroup/rstat.c +++ b/kernel/cgroup/rstat.c @@ -9,8 +9,8 @@  #include <trace/events/cgroup.h> -static DEFINE_SPINLOCK(cgroup_rstat_lock); -static DEFINE_PER_CPU(raw_spinlock_t, cgroup_rstat_cpu_lock); +static DEFINE_SPINLOCK(rstat_base_lock); +static DEFINE_PER_CPU(raw_spinlock_t, rstat_base_cpu_lock);  static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu); @@ -26,8 +26,24 @@ static struct cgroup_rstat_base_cpu *cgroup_rstat_base_cpu(  	return per_cpu_ptr(cgrp->rstat_base_cpu, cpu);  } +static spinlock_t *ss_rstat_lock(struct cgroup_subsys *ss) +{ +	if (ss) +		return &ss->rstat_ss_lock; + +	return &rstat_base_lock; +} + +static raw_spinlock_t *ss_rstat_cpu_lock(struct cgroup_subsys *ss, int cpu) +{ +	if (ss) +		return per_cpu_ptr(ss->rstat_ss_cpu_lock, cpu); + +	return per_cpu_ptr(&rstat_base_cpu_lock, cpu); +} +  /* - * Helper functions for rstat per CPU lock (cgroup_rstat_cpu_lock). + * Helper functions for rstat per CPU locks.   *   * This makes it easier to diagnose locking issues and contention in   * production environments. The parameter @fast_path determine the @@ -35,21 +51,23 @@ static struct cgroup_rstat_base_cpu *cgroup_rstat_base_cpu(   * operations without handling high-frequency fast-path "update" events.   */  static __always_inline -unsigned long _css_rstat_cpu_lock(raw_spinlock_t *cpu_lock, int cpu, -				     struct cgroup_subsys_state *css, const bool fast_path) +unsigned long _css_rstat_cpu_lock(struct cgroup_subsys_state *css, int cpu, +		const bool fast_path)  {  	struct cgroup *cgrp = css->cgroup; +	raw_spinlock_t *cpu_lock;  	unsigned long flags;  	bool contended;  	/* -	 * The _irqsave() is needed because cgroup_rstat_lock is -	 * spinlock_t which is a sleeping lock on PREEMPT_RT. Acquiring -	 * this lock with the _irq() suffix only disables interrupts on -	 * a non-PREEMPT_RT kernel. The raw_spinlock_t below disables -	 * interrupts on both configurations. The _irqsave() ensures -	 * that interrupts are always disabled and later restored. +	 * The _irqsave() is needed because the locks used for flushing are +	 * spinlock_t which is a sleeping lock on PREEMPT_RT. Acquiring this lock +	 * with the _irq() suffix only disables interrupts on a non-PREEMPT_RT +	 * kernel. The raw_spinlock_t below disables interrupts on both +	 * configurations. The _irqsave() ensures that interrupts are always +	 * disabled and later restored.  	 */ +	cpu_lock = ss_rstat_cpu_lock(css->ss, cpu);  	contended = !raw_spin_trylock_irqsave(cpu_lock, flags);  	if (contended) {  		if (fast_path) @@ -69,17 +87,18 @@ unsigned long _css_rstat_cpu_lock(raw_spinlock_t *cpu_lock, int cpu,  }  static __always_inline -void _css_rstat_cpu_unlock(raw_spinlock_t *cpu_lock, int cpu, -			      struct cgroup_subsys_state *css, unsigned long flags, -			      const bool fast_path) +void _css_rstat_cpu_unlock(struct cgroup_subsys_state *css, int cpu, +		unsigned long flags, const bool fast_path)  {  	struct cgroup *cgrp = css->cgroup; +	raw_spinlock_t *cpu_lock;  	if (fast_path)  		trace_cgroup_rstat_cpu_unlock_fastpath(cgrp, cpu, false);  	else  		trace_cgroup_rstat_cpu_unlock(cgrp, cpu, false); +	cpu_lock = ss_rstat_cpu_lock(css->ss, cpu);  	raw_spin_unlock_irqrestore(cpu_lock, flags);  } @@ -94,7 +113,6 @@ void _css_rstat_cpu_unlock(raw_spinlock_t *cpu_lock, int cpu,   */  __bpf_kfunc void css_rstat_updated(struct cgroup_subsys_state *css, int cpu)  { -	raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu);  	unsigned long flags;  	/* @@ -115,7 +133,7 @@ __bpf_kfunc void css_rstat_updated(struct cgroup_subsys_state *css, int cpu)  	if (data_race(css_rstat_cpu(css, cpu)->updated_next))  		return; -	flags = _css_rstat_cpu_lock(cpu_lock, cpu, css, true); +	flags = _css_rstat_cpu_lock(css, cpu, true);  	/* put @css and all ancestors on the corresponding updated lists */  	while (true) { @@ -143,7 +161,7 @@ __bpf_kfunc void css_rstat_updated(struct cgroup_subsys_state *css, int cpu)  		css = parent;  	} -	_css_rstat_cpu_unlock(cpu_lock, cpu, css, flags, true); +	_css_rstat_cpu_unlock(css, cpu, flags, true);  }  /** @@ -171,11 +189,11 @@ static struct cgroup_subsys_state *css_rstat_push_children(  	child->rstat_flush_next = NULL;  	/* -	 * The cgroup_rstat_lock must be held for the whole duration from +	 * The subsystem rstat lock must be held for the whole duration from  	 * here as the rstat_flush_next list is being constructed to when  	 * it is consumed later in css_rstat_flush().  	 */ -	lockdep_assert_held(&cgroup_rstat_lock); +	lockdep_assert_held(ss_rstat_lock(head->ss));  	/*  	 * Notation: -> updated_next pointer @@ -245,12 +263,11 @@ next_level:  static struct cgroup_subsys_state *css_rstat_updated_list(  		struct cgroup_subsys_state *root, int cpu)  { -	raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu);  	struct css_rstat_cpu *rstatc = css_rstat_cpu(root, cpu);  	struct cgroup_subsys_state *head = NULL, *parent, *child;  	unsigned long flags; -	flags = _css_rstat_cpu_lock(cpu_lock, cpu, root, false); +	flags = _css_rstat_cpu_lock(root, cpu, false);  	/* Return NULL if this subtree is not on-list */  	if (!rstatc->updated_next) @@ -287,7 +304,7 @@ static struct cgroup_subsys_state *css_rstat_updated_list(  	if (child != root)  		head = css_rstat_push_children(head, child, cpu);  unlock_ret: -	_css_rstat_cpu_unlock(cpu_lock, cpu, root, flags, false); +	_css_rstat_cpu_unlock(root, cpu, flags, false);  	return head;  } @@ -314,7 +331,7 @@ __weak noinline void bpf_rstat_flush(struct cgroup *cgrp,  __bpf_hook_end();  /* - * Helper functions for locking cgroup_rstat_lock. + * Helper functions for locking.   *   * This makes it easier to diagnose locking issues and contention in   * production environments.  The parameter @cpu_in_loop indicate lock @@ -324,27 +341,31 @@ __bpf_hook_end();   */  static inline void __css_rstat_lock(struct cgroup_subsys_state *css,  		int cpu_in_loop) -	__acquires(&cgroup_rstat_lock) +	__acquires(ss_rstat_lock(css->ss))  {  	struct cgroup *cgrp = css->cgroup; +	spinlock_t *lock;  	bool contended; -	contended = !spin_trylock_irq(&cgroup_rstat_lock); +	lock = ss_rstat_lock(css->ss); +	contended = !spin_trylock_irq(lock);  	if (contended) {  		trace_cgroup_rstat_lock_contended(cgrp, cpu_in_loop, contended); -		spin_lock_irq(&cgroup_rstat_lock); +		spin_lock_irq(lock);  	}  	trace_cgroup_rstat_locked(cgrp, cpu_in_loop, contended);  }  static inline void __css_rstat_unlock(struct cgroup_subsys_state *css,  				      int cpu_in_loop) -	__releases(&cgroup_rstat_lock) +	__releases(ss_rstat_lock(css->ss))  {  	struct cgroup *cgrp = css->cgroup; +	spinlock_t *lock; +	lock = ss_rstat_lock(css->ss);  	trace_cgroup_rstat_unlock(cgrp, cpu_in_loop, false); -	spin_unlock_irq(&cgroup_rstat_lock); +	spin_unlock_irq(lock);  }  /** @@ -466,12 +487,29 @@ void css_rstat_exit(struct cgroup_subsys_state *css)  	css->rstat_cpu = NULL;  } -void __init cgroup_rstat_boot(void) +/** + * ss_rstat_init - subsystem-specific rstat initialization + * @ss: target subsystem + * + * If @ss is NULL, the static locks associated with the base stats + * are initialized. If @ss is non-NULL, the subsystem-specific locks + * are initialized. + */ +int __init ss_rstat_init(struct cgroup_subsys *ss)  {  	int cpu; +	if (ss) { +		ss->rstat_ss_cpu_lock = alloc_percpu(raw_spinlock_t); +		if (!ss->rstat_ss_cpu_lock) +			return -ENOMEM; +	} + +	spin_lock_init(ss_rstat_lock(ss));  	for_each_possible_cpu(cpu) -		raw_spin_lock_init(per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu)); +		raw_spin_lock_init(ss_rstat_cpu_lock(ss, cpu)); + +	return 0;  }  /*  | 
