diff options
author | Johannes Berg <johannes.berg@intel.com> | 2017-06-08 14:14:40 +0200 |
---|---|---|
committer | Johannes Berg <johannes.berg@intel.com> | 2017-06-08 14:14:45 +0200 |
commit | a43e61842ec55baa486d60eed2a19af67ba78b9f (patch) | |
tree | 6c3c93f19e3933273b73b9edd16edc146ab18527 /kernel/fork.c | |
parent | 5d473fedd17ae3a9f92fb35551e307d01459ea6a (diff) | |
parent | 50dffe7fad6c156c2928e45c19ff7b86eb951f4c (diff) |
Merge remote-tracking branch 'net-next/master' into mac80211-next
This brings in commit 7a7c0a6438b8 ("mac80211: fix TX aggregation
start/stop callback race") to allow the follow-up cleanup.
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Diffstat (limited to 'kernel/fork.c')
-rw-r--r-- | kernel/fork.c | 25 |
1 files changed, 18 insertions, 7 deletions
diff --git a/kernel/fork.c b/kernel/fork.c index 06d759ab4c62..e53770d2bf95 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1577,6 +1577,18 @@ static __latent_entropy struct task_struct *copy_process( if (!p) goto fork_out; + /* + * This _must_ happen before we call free_task(), i.e. before we jump + * to any of the bad_fork_* labels. This is to avoid freeing + * p->set_child_tid which is (ab)used as a kthread's data pointer for + * kernel threads (PF_KTHREAD). + */ + p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; + /* + * Clear TID on mm_release()? + */ + p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL; + ftrace_graph_init_task(p); rt_mutex_init_task(p); @@ -1743,11 +1755,6 @@ static __latent_entropy struct task_struct *copy_process( } } - p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; - /* - * Clear TID on mm_release()? - */ - p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL; #ifdef CONFIG_BLOCK p->plug = NULL; #endif @@ -1845,11 +1852,13 @@ static __latent_entropy struct task_struct *copy_process( */ recalc_sigpending(); if (signal_pending(current)) { - spin_unlock(¤t->sighand->siglock); - write_unlock_irq(&tasklist_lock); retval = -ERESTARTNOINTR; goto bad_fork_cancel_cgroup; } + if (unlikely(!(ns_of_pid(pid)->nr_hashed & PIDNS_HASH_ADDING))) { + retval = -ENOMEM; + goto bad_fork_cancel_cgroup; + } if (likely(p->pid)) { ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace); @@ -1907,6 +1916,8 @@ static __latent_entropy struct task_struct *copy_process( return p; bad_fork_cancel_cgroup: + spin_unlock(¤t->sighand->siglock); + write_unlock_irq(&tasklist_lock); cgroup_cancel_fork(p); bad_fork_free_pid: cgroup_threadgroup_change_end(current); |