summaryrefslogtreecommitdiff
path: root/kernel/cgroup
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/cgroup')
-rw-r--r--kernel/cgroup/legacy_freezer.c11
-rw-r--r--kernel/cgroup/rstat.c25
2 files changed, 18 insertions, 18 deletions
diff --git a/kernel/cgroup/legacy_freezer.c b/kernel/cgroup/legacy_freezer.c
index 039d1eb2f215..dd9417425d92 100644
--- a/kernel/cgroup/legacy_freezer.c
+++ b/kernel/cgroup/legacy_freezer.c
@@ -66,15 +66,9 @@ static struct freezer *parent_freezer(struct freezer *freezer)
bool cgroup_freezing(struct task_struct *task)
{
bool ret;
- unsigned int state;
rcu_read_lock();
- /* Check if the cgroup is still FREEZING, but not FROZEN. The extra
- * !FROZEN check is required, because the FREEZING bit is not cleared
- * when the state FROZEN is reached.
- */
- state = task_freezer(task)->state;
- ret = (state & CGROUP_FREEZING) && !(state & CGROUP_FROZEN);
+ ret = task_freezer(task)->state & CGROUP_FREEZING;
rcu_read_unlock();
return ret;
@@ -188,13 +182,12 @@ static void freezer_attach(struct cgroup_taskset *tset)
if (!(freezer->state & CGROUP_FREEZING)) {
__thaw_task(task);
} else {
- freeze_task(task);
-
/* clear FROZEN and propagate upwards */
while (freezer && (freezer->state & CGROUP_FROZEN)) {
freezer->state &= ~CGROUP_FROZEN;
freezer = parent_freezer(freezer);
}
+ freeze_task(task);
}
}
diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c
index ce4752ab9e09..cbeaa499a96a 100644
--- a/kernel/cgroup/rstat.c
+++ b/kernel/cgroup/rstat.c
@@ -47,8 +47,20 @@ static spinlock_t *ss_rstat_lock(struct cgroup_subsys *ss)
static raw_spinlock_t *ss_rstat_cpu_lock(struct cgroup_subsys *ss, int cpu)
{
- if (ss)
+ if (ss) {
+ /*
+ * Depending on config, the subsystem per-cpu lock type may be an
+ * empty struct. In enviromnents where this is the case, allocation
+ * of this field is not performed in ss_rstat_init(). Avoid a
+ * cpu-based offset relative to NULL by returning early. When the
+ * lock type is zero in size, the corresponding lock functions are
+ * no-ops so passing them NULL is acceptable.
+ */
+ if (sizeof(*ss->rstat_ss_cpu_lock) == 0)
+ return NULL;
+
return per_cpu_ptr(ss->rstat_ss_cpu_lock, cpu);
+ }
return per_cpu_ptr(&rstat_base_cpu_lock, cpu);
}
@@ -510,20 +522,15 @@ int __init ss_rstat_init(struct cgroup_subsys *ss)
{
int cpu;
-#ifdef CONFIG_SMP
/*
- * On uniprocessor machines, arch_spinlock_t is defined as an empty
- * struct. Avoid allocating a size of zero by having this block
- * excluded in this case. It's acceptable to leave the subsystem locks
- * unitialized since the associated lock functions are no-ops in the
- * non-smp case.
+ * Depending on config, the subsystem per-cpu lock type may be an empty
+ * struct. Avoid allocating a size of zero in this case.
*/
- if (ss) {
+ if (ss && sizeof(*ss->rstat_ss_cpu_lock)) {
ss->rstat_ss_cpu_lock = alloc_percpu(raw_spinlock_t);
if (!ss->rstat_ss_cpu_lock)
return -ENOMEM;
}
-#endif
spin_lock_init(ss_rstat_lock(ss));
for_each_possible_cpu(cpu)