summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2015-09-11 15:00:19 -0400
committerTejun Heo <tj@kernel.org>2015-09-22 12:46:53 -0400
commit4530eddb59494b89650d6bcd980fc7f7717ad80c (patch)
tree0f6212afa2e7e0e4724c5f12644508f1a0baf138 /kernel
parent3df9ca0a2b8b50db5a079ae9d97c5b55435e9a6c (diff)
cgroup, memcg, cpuset: implement cgroup_taskset_for_each_leader()
It wasn't explicitly documented but, when a process is being migrated, cpuset and memcg depend on cgroup_taskset_first() returning the threadgroup leader; however, this approach is somewhat ghetto and would no longer work for the planned multi-process migration. This patch introduces explicit cgroup_taskset_for_each_leader() which iterates over only the threadgroup leaders and replaces cgroup_taskset_first() usages for accessing the leader with it. This prepares both memcg and cpuset for multi-process migration. This patch also updates the documentation for cgroup_taskset_for_each() to clarify the iteration rules and removes comments mentioning task ordering in tasksets. v2: A previous patch which added threadgroup leader test was dropped. Patch updated accordingly. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Zefan Li <lizefan@huawei.com> Acked-by: Michal Hocko <mhocko@suse.cz> Cc: Johannes Weiner <hannes@cmpxchg.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cgroup.c11
-rw-r--r--kernel/cpuset.c9
2 files changed, 4 insertions, 16 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 0be276ffe08a..7f4b85af03dc 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -2217,13 +2217,6 @@ static void cgroup_task_migrate(struct cgroup *old_cgrp,
get_css_set(new_cset);
rcu_assign_pointer(tsk->cgroups, new_cset);
-
- /*
- * Use move_tail so that cgroup_taskset_first() still returns the
- * leader after migration. This works because cgroup_migrate()
- * ensures that the dst_cset of the leader is the first on the
- * tset's dst_csets list.
- */
list_move_tail(&tsk->cg_list, &new_cset->mg_tasks);
/*
@@ -2419,10 +2412,6 @@ static int cgroup_migrate(struct cgroup *cgrp, struct task_struct *leader,
if (!cset->mg_src_cgrp)
goto next;
- /*
- * cgroup_taskset_first() must always return the leader.
- * Take care to avoid disturbing the ordering.
- */
list_move_tail(&task->cg_list, &cset->mg_tasks);
if (list_empty(&cset->mg_node))
list_add_tail(&cset->mg_node, &tset.src_csets);
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 0b361a0b58f6..e4d999929903 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1488,7 +1488,7 @@ static void cpuset_attach(struct cgroup_subsys_state *css,
/* static buf protected by cpuset_mutex */
static nodemask_t cpuset_attach_nodemask_to;
struct task_struct *task;
- struct task_struct *leader = cgroup_taskset_first(tset);
+ struct task_struct *leader;
struct cpuset *cs = css_cs(css);
struct cpuset *oldcs = cpuset_attach_old_cs;
@@ -1514,12 +1514,11 @@ static void cpuset_attach(struct cgroup_subsys_state *css,
}
/*
- * Change mm, possibly for multiple threads in a threadgroup. This
- * is expensive and may sleep and should be moved outside migration
- * path proper.
+ * Change mm for all threadgroup leaders. This is expensive and may
+ * sleep and should be moved outside migration path proper.
*/
cpuset_attach_nodemask_to = cs->effective_mems;
- if (thread_group_leader(leader)) {
+ cgroup_taskset_for_each_leader(leader, tset) {
struct mm_struct *mm = get_task_mm(leader);
if (mm) {