summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@tv-sign.ru>2006-03-28 16:11:16 -0800
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-28 18:36:42 -0800
commit6b3934ef52712ece50605dfc72e55d00c580831a (patch)
tree5ec3c4f69a20880f75de6ff8d7d2f67d96328df3 /kernel
parent7001510d0cbf51ad202dd2d0744f54104285cbb9 (diff)
[PATCH] copy_process: cleanup bad_fork_cleanup_signal
__exit_signal() does important cleanups atomically under ->siglock. It is also called from copy_process's error path. This is not good, for example we can't move __unhash_process() under ->siglock for that reason. We should not mix these 2 paths, just look at ugly 'if (p->sighand)' under 'bad_fork_cleanup_sighand:' label. For copy_process() case it is sufficient to just backout copy_signal(), nothing more. Again, nobody can see this task yet. For CLONE_THREAD case we just decrement signal->count, otherwise nobody can see this ->signal and we can free it lockless. This patch assumes it is safe to do exit_thread_group_keys() without tasklist_lock. Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru> Cc: "Eric W. Biederman" <ebiederm@xmission.com> Acked-by: David Howells <dhowells@redhat.com> Signed-off-by: Adrian Bunk <bunk@stusta.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/fork.c23
-rw-r--r--kernel/signal.c15
2 files changed, 20 insertions, 18 deletions
diff --git a/kernel/fork.c b/kernel/fork.c
index 8a46ad52be8f..0aff28cdbadd 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -84,7 +84,7 @@ static kmem_cache_t *task_struct_cachep;
#endif
/* SLAB cache for signal_struct structures (tsk->signal) */
-kmem_cache_t *signal_cachep;
+static kmem_cache_t *signal_cachep;
/* SLAB cache for sighand_struct structures (tsk->sighand) */
kmem_cache_t *sighand_cachep;
@@ -872,6 +872,22 @@ static inline int copy_signal(unsigned long clone_flags, struct task_struct * ts
return 0;
}
+void __cleanup_signal(struct signal_struct *sig)
+{
+ exit_thread_group_keys(sig);
+ kmem_cache_free(signal_cachep, sig);
+}
+
+static inline void cleanup_signal(struct task_struct *tsk)
+{
+ struct signal_struct *sig = tsk->signal;
+
+ atomic_dec(&sig->live);
+
+ if (atomic_dec_and_test(&sig->count))
+ __cleanup_signal(sig);
+}
+
static inline void copy_flags(unsigned long clone_flags, struct task_struct *p)
{
unsigned long new_flags = p->flags;
@@ -1206,10 +1222,9 @@ bad_fork_cleanup_mm:
if (p->mm)
mmput(p->mm);
bad_fork_cleanup_signal:
- exit_signal(p);
+ cleanup_signal(p);
bad_fork_cleanup_sighand:
- if (p->sighand)
- __exit_sighand(p);
+ __exit_sighand(p);
bad_fork_cleanup_fs:
exit_fs(p); /* blocking */
bad_fork_cleanup_files:
diff --git a/kernel/signal.c b/kernel/signal.c
index 1d7f4463c32d..54e9ef673e68 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -395,23 +395,10 @@ void __exit_signal(struct task_struct *tsk)
clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
flush_sigqueue(&tsk->pending);
if (sig) {
- /*
- * We are cleaning up the signal_struct here.
- */
- exit_thread_group_keys(sig);
- kmem_cache_free(signal_cachep, sig);
+ __cleanup_signal(sig);
}
}
-void exit_signal(struct task_struct *tsk)
-{
- atomic_dec(&tsk->signal->live);
-
- write_lock_irq(&tasklist_lock);
- __exit_signal(tsk);
- write_unlock_irq(&tasklist_lock);
-}
-
/*
* Flush all handlers for a task.
*/