summaryrefslogtreecommitdiff
path: root/kernel/cgroup
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2018-04-26 14:29:04 -0700
committerTejun Heo <tj@kernel.org>2018-04-26 14:29:04 -0700
commitd4ff749b5e0f1e2d4d69a3e4ea81cdeaeb4904d2 (patch)
tree99a922407a9aa00dbd7441e26969e18120a027e5 /kernel/cgroup
parentc58632b3631cb222da41d9dc0dd39e106c1eafd0 (diff)
cgroup: Distinguish base resource stat implementation from rstat
Base resource stat accounts universial (not specific to any controller) resource consumptions on top of rstat. Currently, its implementation is intermixed with rstat implementation making the code confusing to follow. This patch clarifies the distintion by doing the followings. * Encapsulate base resource stat counters, currently only cputime, in struct cgroup_base_stat. * Move prev_cputime into struct cgroup and initialize it with cgroup. * Rename the related functions so that they start with cgroup_base_stat. * Prefix the related variables and field names with b. This patch doesn't make any functional changes. Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/cgroup')
-rw-r--r--kernel/cgroup/cgroup-internal.h2
-rw-r--r--kernel/cgroup/cgroup.c4
-rw-r--r--kernel/cgroup/rstat.c67
3 files changed, 36 insertions, 37 deletions
diff --git a/kernel/cgroup/cgroup-internal.h b/kernel/cgroup/cgroup-internal.h
index 092711114a1f..aab4d0a09670 100644
--- a/kernel/cgroup/cgroup-internal.h
+++ b/kernel/cgroup/cgroup-internal.h
@@ -206,7 +206,7 @@ int cgroup_task_count(const struct cgroup *cgrp);
void cgroup_rstat_flush(struct cgroup *cgrp);
int cgroup_rstat_init(struct cgroup *cgrp);
void cgroup_rstat_exit(struct cgroup *cgrp);
-void cgroup_stat_show_cputime(struct seq_file *seq);
+void cgroup_base_stat_cputime_show(struct seq_file *seq);
void cgroup_rstat_boot(void);
/*
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 32eb7ce0ad71..31af98996692 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -54,6 +54,7 @@
#include <linux/proc_ns.h>
#include <linux/nsproxy.h>
#include <linux/file.h>
+#include <linux/sched/cputime.h>
#include <net/sock.h>
#define CREATE_TRACE_POINTS
@@ -1859,6 +1860,7 @@ static void init_cgroup_housekeeping(struct cgroup *cgrp)
cgrp->dom_cgrp = cgrp;
cgrp->max_descendants = INT_MAX;
cgrp->max_depth = INT_MAX;
+ prev_cputime_init(&cgrp->prev_cputime);
for_each_subsys(ss, ssid)
INIT_LIST_HEAD(&cgrp->e_csets[ssid]);
@@ -3396,7 +3398,7 @@ static int cpu_stat_show(struct seq_file *seq, void *v)
struct cgroup __maybe_unused *cgrp = seq_css(seq)->cgroup;
int ret = 0;
- cgroup_stat_show_cputime(seq);
+ cgroup_base_stat_cputime_show(seq);
#ifdef CONFIG_CGROUP_SCHED
ret = cgroup_extra_stat_show(seq, cgrp, cpu_cgrp_id);
#endif
diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c
index 6824047b57a9..7670191fa776 100644
--- a/kernel/cgroup/rstat.c
+++ b/kernel/cgroup/rstat.c
@@ -128,30 +128,30 @@ static struct cgroup *cgroup_rstat_cpu_pop_updated(struct cgroup *pos,
return pos;
}
-static void cgroup_stat_accumulate(struct cgroup_stat *dst_stat,
- struct cgroup_stat *src_stat)
+static void cgroup_base_stat_accumulate(struct cgroup_base_stat *dst_bstat,
+ struct cgroup_base_stat *src_bstat)
{
- dst_stat->cputime.utime += src_stat->cputime.utime;
- dst_stat->cputime.stime += src_stat->cputime.stime;
- dst_stat->cputime.sum_exec_runtime += src_stat->cputime.sum_exec_runtime;
+ dst_bstat->cputime.utime += src_bstat->cputime.utime;
+ dst_bstat->cputime.stime += src_bstat->cputime.stime;
+ dst_bstat->cputime.sum_exec_runtime += src_bstat->cputime.sum_exec_runtime;
}
-static void cgroup_cpu_stat_flush_one(struct cgroup *cgrp, int cpu)
+static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu)
{
struct cgroup *parent = cgroup_parent(cgrp);
struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
- struct task_cputime *last_cputime = &rstatc->last_cputime;
+ struct task_cputime *last_cputime = &rstatc->last_bstat.cputime;
struct task_cputime cputime;
- struct cgroup_stat delta;
+ struct cgroup_base_stat delta;
unsigned seq;
lockdep_assert_held(&cgroup_rstat_mutex);
/* fetch the current per-cpu values */
do {
- seq = __u64_stats_fetch_begin(&rstatc->sync);
- cputime = rstatc->cputime;
- } while (__u64_stats_fetch_retry(&rstatc->sync, seq));
+ seq = __u64_stats_fetch_begin(&rstatc->bsync);
+ cputime = rstatc->bstat.cputime;
+ } while (__u64_stats_fetch_retry(&rstatc->bsync, seq));
/* accumulate the deltas to propgate */
delta.cputime.utime = cputime.utime - last_cputime->utime;
@@ -161,13 +161,13 @@ static void cgroup_cpu_stat_flush_one(struct cgroup *cgrp, int cpu)
*last_cputime = cputime;
/* transfer the pending stat into delta */
- cgroup_stat_accumulate(&delta, &cgrp->pending_stat);
- memset(&cgrp->pending_stat, 0, sizeof(cgrp->pending_stat));
+ cgroup_base_stat_accumulate(&delta, &cgrp->pending_bstat);
+ memset(&cgrp->pending_bstat, 0, sizeof(cgrp->pending_bstat));
/* propagate delta into the global stat and the parent's pending */
- cgroup_stat_accumulate(&cgrp->stat, &delta);
+ cgroup_base_stat_accumulate(&cgrp->bstat, &delta);
if (parent)
- cgroup_stat_accumulate(&parent->pending_stat, &delta);
+ cgroup_base_stat_accumulate(&parent->pending_bstat, &delta);
}
/* see cgroup_rstat_flush() */
@@ -184,7 +184,7 @@ static void cgroup_rstat_flush_locked(struct cgroup *cgrp)
raw_spin_lock_irq(cpu_lock);
while ((pos = cgroup_rstat_cpu_pop_updated(pos, cgrp, cpu)))
- cgroup_cpu_stat_flush_one(pos, cpu);
+ cgroup_base_stat_flush(pos, cpu);
raw_spin_unlock_irq(cpu_lock);
}
}
@@ -208,19 +208,19 @@ void cgroup_rstat_flush(struct cgroup *cgrp)
}
static struct cgroup_rstat_cpu *
-cgroup_cpu_stat_account_begin(struct cgroup *cgrp)
+cgroup_base_stat_cputime_account_begin(struct cgroup *cgrp)
{
struct cgroup_rstat_cpu *rstatc;
rstatc = get_cpu_ptr(cgrp->rstat_cpu);
- u64_stats_update_begin(&rstatc->sync);
+ u64_stats_update_begin(&rstatc->bsync);
return rstatc;
}
-static void cgroup_cpu_stat_account_end(struct cgroup *cgrp,
- struct cgroup_rstat_cpu *rstatc)
+static void cgroup_base_stat_cputime_account_end(struct cgroup *cgrp,
+ struct cgroup_rstat_cpu *rstatc)
{
- u64_stats_update_end(&rstatc->sync);
+ u64_stats_update_end(&rstatc->bsync);
cgroup_rstat_cpu_updated(cgrp, smp_processor_id());
put_cpu_ptr(rstatc);
}
@@ -229,9 +229,9 @@ void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec)
{
struct cgroup_rstat_cpu *rstatc;
- rstatc = cgroup_cpu_stat_account_begin(cgrp);
- rstatc->cputime.sum_exec_runtime += delta_exec;
- cgroup_cpu_stat_account_end(cgrp, rstatc);
+ rstatc = cgroup_base_stat_cputime_account_begin(cgrp);
+ rstatc->bstat.cputime.sum_exec_runtime += delta_exec;
+ cgroup_base_stat_cputime_account_end(cgrp, rstatc);
}
void __cgroup_account_cputime_field(struct cgroup *cgrp,
@@ -239,26 +239,26 @@ void __cgroup_account_cputime_field(struct cgroup *cgrp,
{
struct cgroup_rstat_cpu *rstatc;
- rstatc = cgroup_cpu_stat_account_begin(cgrp);
+ rstatc = cgroup_base_stat_cputime_account_begin(cgrp);
switch (index) {
case CPUTIME_USER:
case CPUTIME_NICE:
- rstatc->cputime.utime += delta_exec;
+ rstatc->bstat.cputime.utime += delta_exec;
break;
case CPUTIME_SYSTEM:
case CPUTIME_IRQ:
case CPUTIME_SOFTIRQ:
- rstatc->cputime.stime += delta_exec;
+ rstatc->bstat.cputime.stime += delta_exec;
break;
default:
break;
}
- cgroup_cpu_stat_account_end(cgrp, rstatc);
+ cgroup_base_stat_cputime_account_end(cgrp, rstatc);
}
-void cgroup_stat_show_cputime(struct seq_file *seq)
+void cgroup_base_stat_cputime_show(struct seq_file *seq)
{
struct cgroup *cgrp = seq_css(seq)->cgroup;
u64 usage, utime, stime;
@@ -270,9 +270,8 @@ void cgroup_stat_show_cputime(struct seq_file *seq)
cgroup_rstat_flush_locked(cgrp);
- usage = cgrp->stat.cputime.sum_exec_runtime;
- cputime_adjust(&cgrp->stat.cputime, &cgrp->stat.prev_cputime,
- &utime, &stime);
+ usage = cgrp->bstat.cputime.sum_exec_runtime;
+ cputime_adjust(&cgrp->bstat.cputime, &cgrp->prev_cputime, &utime, &stime);
mutex_unlock(&cgroup_rstat_mutex);
@@ -302,11 +301,9 @@ int cgroup_rstat_init(struct cgroup *cgrp)
struct cgroup_rstat_cpu *rstatc = cgroup_rstat_cpu(cgrp, cpu);
rstatc->updated_children = cgrp;
- u64_stats_init(&rstatc->sync);
+ u64_stats_init(&rstatc->bsync);
}
- prev_cputime_init(&cgrp->stat.prev_cputime);
-
return 0;
}