summaryrefslogtreecommitdiff
path: root/kernel/cgroup.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2014-02-25 09:56:49 -0500
committerTejun Heo <tj@kernel.org>2014-02-25 09:56:49 -0500
commitf153ad11bca27996a5e8e1782557e36e80b03a8c (patch)
treedf87bf56d2c7fb6d7b3122ed838ce652337b08a3 /kernel/cgroup.c
parentdc5736ed7aaf942caaac0c15af74a018e04ec79d (diff)
parent532de3fc72adc2a6525c4d53c07bf81e1732083d (diff)
Merge branch 'cgroup/for-3.14-fixes' into cgroup/for-3.15
Pull in for-3.14-fixes to receive 532de3fc72ad ("cgroup: update cgroup_enable_task_cg_lists() to grab siglock") which conflicts with afeb0f9fd425 ("cgroup: relocate cgroup_enable_task_cg_lists()") and the following cg_lists updates. This is likely to cause further conflicts down the line too, so let's merge it early. As cgroup_enable_task_cg_lists() is relocated in for-3.15, this merge causes conflict in the original position. It's resolved by applying siglock changes to the updated version in the new location. Conflicts: kernel/cgroup.c Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/cgroup.c')
-rw-r--r--kernel/cgroup.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 771d1b8aaae9..8ab800c7bac0 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1331,9 +1331,13 @@ static void cgroup_enable_task_cg_lists(void)
* We should check if the process is exiting, otherwise
* it will race with cgroup_exit() in that the list
* entry won't be deleted though the process has exited.
+ * Do it while holding siglock so that we don't end up
+ * racing against cgroup_exit().
*/
+ spin_lock_irq(&p->sighand->siglock);
if (!(p->flags & PF_EXITING))
list_add(&p->cg_list, &task_css_set(p)->tasks);
+ spin_unlock_irq(&p->sighand->siglock);
task_unlock(p);
} while_each_thread(g, p);
@@ -3968,16 +3972,12 @@ static int __init cgroup_wq_init(void)
/*
* There isn't much point in executing destruction path in
* parallel. Good chunk is serialized with cgroup_mutex anyway.
- *
- * XXX: Must be ordered to make sure parent is offlined after
- * children. The ordering requirement is for memcg where a
- * parent's offline may wait for a child's leading to deadlock. In
- * the long term, this should be fixed from memcg side.
+ * Use 1 for @max_active.
*
* We would prefer to do this in cgroup_init() above, but that
* is called before init_workqueues(): so leave this until after.
*/
- cgroup_destroy_wq = alloc_ordered_workqueue("cgroup_destroy", 0);
+ cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
BUG_ON(!cgroup_destroy_wq);
/*