summaryrefslogtreecommitdiff
path: root/kernel/exit.c
diff options
context:
space:
mode:
authorEric W. Biederman <ebiederm@xmission.com>2021-09-01 11:23:38 -0500
committerEric W. Biederman <ebiederm@xmission.com>2021-10-06 11:28:21 -0500
commitd67e03e361619b20c51aaef3b7dd1497617c371d (patch)
tree1133d040900e3d9956e0ecfcffaa75c2f58a43b5 /kernel/exit.c
parent7e3c4fb7fc19bcf20657de3edb718ec1b26c7df3 (diff)
exit: Factor coredump_exit_mm out of exit_mm
Separate the coredump logic from the ordinary exit_mm logic by moving the coredump logic out of exit_mm into it's own function coredump_exit_mm. Link: https://lkml.kernel.org/r/87a6k2x277.fsf@disp2133 Reviewed-by: Kees Cook <keescook@chromium.org> Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
Diffstat (limited to 'kernel/exit.c')
-rw-r--r--kernel/exit.c76
1 files changed, 41 insertions, 35 deletions
diff --git a/kernel/exit.c b/kernel/exit.c
index 91a43e57a32e..cb1619d8fd64 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -339,6 +339,46 @@ kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent)
}
}
+static void coredump_exit_mm(struct mm_struct *mm)
+{
+ struct core_state *core_state;
+
+ /*
+ * Serialize with any possible pending coredump.
+ * We must hold mmap_lock around checking core_state
+ * and clearing tsk->mm. The core-inducing thread
+ * will increment ->nr_threads for each thread in the
+ * group with ->mm != NULL.
+ */
+ core_state = mm->core_state;
+ if (core_state) {
+ struct core_thread self;
+
+ mmap_read_unlock(mm);
+
+ self.task = current;
+ if (self.task->flags & PF_SIGNALED)
+ self.next = xchg(&core_state->dumper.next, &self);
+ else
+ self.task = NULL;
+ /*
+ * Implies mb(), the result of xchg() must be visible
+ * to core_state->dumper.
+ */
+ if (atomic_dec_and_test(&core_state->nr_threads))
+ complete(&core_state->startup);
+
+ for (;;) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ if (!self.task) /* see coredump_finish() */
+ break;
+ freezable_schedule();
+ }
+ __set_current_state(TASK_RUNNING);
+ mmap_read_lock(mm);
+ }
+}
+
#ifdef CONFIG_MEMCG
/*
* A task is exiting. If it owned this mm, find a new owner for the mm.
@@ -434,47 +474,13 @@ assign_new_owner:
static void exit_mm(void)
{
struct mm_struct *mm = current->mm;
- struct core_state *core_state;
exit_mm_release(current, mm);
if (!mm)
return;
sync_mm_rss(mm);
- /*
- * Serialize with any possible pending coredump.
- * We must hold mmap_lock around checking core_state
- * and clearing tsk->mm. The core-inducing thread
- * will increment ->nr_threads for each thread in the
- * group with ->mm != NULL.
- */
mmap_read_lock(mm);
- core_state = mm->core_state;
- if (core_state) {
- struct core_thread self;
-
- mmap_read_unlock(mm);
-
- self.task = current;
- if (self.task->flags & PF_SIGNALED)
- self.next = xchg(&core_state->dumper.next, &self);
- else
- self.task = NULL;
- /*
- * Implies mb(), the result of xchg() must be visible
- * to core_state->dumper.
- */
- if (atomic_dec_and_test(&core_state->nr_threads))
- complete(&core_state->startup);
-
- for (;;) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- if (!self.task) /* see coredump_finish() */
- break;
- freezable_schedule();
- }
- __set_current_state(TASK_RUNNING);
- mmap_read_lock(mm);
- }
+ coredump_exit_mm(mm);
mmgrab(mm);
BUG_ON(mm != current->active_mm);
/* more a memory barrier than a real lock */