diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/cgroup.c | 12 | 
1 files changed, 6 insertions, 6 deletions
| diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 771d1b8aaae9..8ab800c7bac0 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -1331,9 +1331,13 @@ static void cgroup_enable_task_cg_lists(void)  		 * We should check if the process is exiting, otherwise  		 * it will race with cgroup_exit() in that the list  		 * entry won't be deleted though the process has exited. +		 * Do it while holding siglock so that we don't end up +		 * racing against cgroup_exit().  		 */ +		spin_lock_irq(&p->sighand->siglock);  		if (!(p->flags & PF_EXITING))  			list_add(&p->cg_list, &task_css_set(p)->tasks); +		spin_unlock_irq(&p->sighand->siglock);  		task_unlock(p);  	} while_each_thread(g, p); @@ -3968,16 +3972,12 @@ static int __init cgroup_wq_init(void)  	/*  	 * There isn't much point in executing destruction path in  	 * parallel.  Good chunk is serialized with cgroup_mutex anyway. -	 * -	 * XXX: Must be ordered to make sure parent is offlined after -	 * children.  The ordering requirement is for memcg where a -	 * parent's offline may wait for a child's leading to deadlock.  In -	 * the long term, this should be fixed from memcg side. +	 * Use 1 for @max_active.  	 *  	 * We would prefer to do this in cgroup_init() above, but that  	 * is called before init_workqueues(): so leave this until after.  	 */ -	cgroup_destroy_wq = alloc_ordered_workqueue("cgroup_destroy", 0); +	cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);  	BUG_ON(!cgroup_destroy_wq);  	/* | 
