summaryrefslogtreecommitdiff
path: root/kernel/fork.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/fork.c')
-rw-r--r--kernel/fork.c43
1 files changed, 20 insertions, 23 deletions
diff --git a/kernel/fork.c b/kernel/fork.c
index a17621c6cd42..086fe73ad6bd 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -532,12 +532,11 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p)
mm->flags = (current->mm) ?
(current->mm->flags & MMF_INIT_MASK) : default_dump_filter;
mm->core_state = NULL;
- atomic_long_set(&mm->nr_ptes, 0);
+ mm->nr_ptes = 0;
memset(&mm->rss_stat, 0, sizeof(mm->rss_stat));
spin_lock_init(&mm->page_table_lock);
mm_init_aio(mm);
mm_init_owner(mm, p);
- clear_tlb_flush_pending(mm);
if (likely(!mm_alloc_pgd(mm))) {
mm->def_flags = 0;
@@ -561,7 +560,7 @@ static void check_mm(struct mm_struct *mm)
"mm:%p idx:%d val:%ld\n", mm, i, x);
}
-#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
VM_BUG_ON(mm->pmd_huge_pte);
#endif
}
@@ -800,11 +799,14 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm)
* Allocate a new mm structure and copy contents from the
* mm structure of the passed in task structure.
*/
-static struct mm_struct *dup_mm(struct task_struct *tsk)
+struct mm_struct *dup_mm(struct task_struct *tsk)
{
struct mm_struct *mm, *oldmm = current->mm;
int err;
+ if (!oldmm)
+ return NULL;
+
mm = allocate_mm();
if (!mm)
goto fail_nomem;
@@ -812,9 +814,12 @@ static struct mm_struct *dup_mm(struct task_struct *tsk)
memcpy(mm, oldmm, sizeof(*mm));
mm_init_cpumask(mm);
-#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
mm->pmd_huge_pte = NULL;
#endif
+#ifdef CONFIG_NUMA_BALANCING
+ mm->first_nid = NUMA_PTE_SCAN_INIT;
+#endif
if (!mm_init(mm, tsk))
goto fail_nomem;
@@ -1032,11 +1037,6 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
sig->nr_threads = 1;
atomic_set(&sig->live, 1);
atomic_set(&sig->sigcnt, 1);
-
- /* list_add(thread_node, thread_head) without INIT_LIST_HEAD() */
- sig->thread_head = (struct list_head)LIST_HEAD_INIT(tsk->thread_node);
- tsk->thread_node = (struct list_head)LIST_HEAD_INIT(sig->thread_head);
-
init_waitqueue_head(&sig->wait_chldexit);
sig->curr_target = tsk;
init_sigpending(&sig->shared_pending);
@@ -1089,10 +1089,8 @@ static void rt_mutex_init_task(struct task_struct *p)
{
raw_spin_lock_init(&p->pi_lock);
#ifdef CONFIG_RT_MUTEXES
- p->pi_waiters = RB_ROOT;
- p->pi_waiters_leftmost = NULL;
+ plist_head_init(&p->pi_waiters);
p->pi_blocked_on = NULL;
- p->pi_top_task = NULL;
#endif
}
@@ -1176,7 +1174,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
* do not allow it to share a thread group or signal handlers or
* parent with the forking task.
*/
- if (clone_flags & CLONE_SIGHAND) {
+ if (clone_flags & (CLONE_SIGHAND | CLONE_PARENT)) {
if ((clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) ||
(task_active_pid_ns(current) !=
current->nsproxy->pid_ns_for_children))
@@ -1226,6 +1224,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
if (!try_module_get(task_thread_info(p)->exec_domain->module))
goto bad_fork_cleanup_count;
+ p->did_exec = 0;
delayacct_tsk_init(p); /* Must remain after dup_task_struct() */
copy_flags(clone_flags, p);
INIT_LIST_HEAD(&p->children);
@@ -1314,9 +1313,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
#endif
/* Perform scheduler related setup. Assign this task to a CPU. */
- retval = sched_fork(clone_flags, p);
- if (retval)
- goto bad_fork_cleanup_policy;
+ sched_fork(p);
retval = perf_event_init_task(p);
if (retval)
@@ -1376,6 +1373,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
INIT_LIST_HEAD(&p->pi_state_list);
p->pi_state_cache = NULL;
#endif
+ uprobe_copy_process(p);
/*
* sigaltstack should be cleared when sharing the same VM
*/
@@ -1408,11 +1406,13 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->tgid = p->pid;
}
+ p->pdeath_signal = 0;
+ p->exit_state = 0;
+
p->nr_dirtied = 0;
p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10);
p->dirty_paused_when = 0;
- p->pdeath_signal = 0;
INIT_LIST_HEAD(&p->thread_group);
p->task_works = NULL;
@@ -1475,8 +1475,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
atomic_inc(&current->signal->sigcnt);
list_add_tail_rcu(&p->thread_group,
&p->group_leader->thread_group);
- list_add_tail_rcu(&p->thread_node,
- &p->signal->thread_head);
}
attach_pid(p, PIDTYPE_PID);
nr_threads++;
@@ -1492,7 +1490,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
perf_event_fork(p);
trace_task_newtask(p, clone_flags);
- uprobe_copy_process(p, clone_flags);
return p;
@@ -1650,7 +1647,7 @@ SYSCALL_DEFINE0(fork)
return do_fork(SIGCHLD, 0, 0, NULL, NULL);
#else
/* can not support in nommu mode */
- return -EINVAL;
+ return(-EINVAL);
#endif
}
#endif
@@ -1658,7 +1655,7 @@ SYSCALL_DEFINE0(fork)
#ifdef __ARCH_WANT_SYS_VFORK
SYSCALL_DEFINE0(vfork)
{
- return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, 0,
+ return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, 0,
0, NULL, NULL);
}
#endif