summaryrefslogtreecommitdiff
path: root/kernel/sched_fair.c
diff options
context:
space:
mode:
authorBharata B Rao <bharata@linux.vnet.ibm.com>2008-08-28 14:42:49 +0530
committerIngo Molnar <mingo@elte.hu>2008-08-28 11:35:51 +0200
commitaec0a5142cb52aaa152d962d84a838e25d520742 (patch)
tree8cf21e8fd5c5bc513923fd38acfc562ae0ab39d1 /kernel/sched_fair.c
parent65eb3dc609dec17deea48dcd4de2e549d29a9824 (diff)
sched: call resched_task() conditionally from new task wake up path
- During wake up of a new task, task_new_fair() can do a resched_task() on the current task. Later in the code path, check_preempt_curr() also ends up doing the same, which can be avoided. Check if TIF_NEED_RESCHED is already set for the current task. - task_new_fair() does a resched_task() on the current task unconditionally. This can be done only in case when child runs before the parent. So this is a small speedup. Signed-off-by: Bharata B Rao <bharata@linux.vnet.ibm.com> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/sched_fair.c')
-rw-r--r--kernel/sched_fair.c9
1 files changed, 8 insertions, 1 deletions
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index fb8994c6d4bb..8264bb5dbd51 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1348,6 +1348,13 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
if (unlikely(se == pse))
return;
+ /*
+ * We can come here with TIF_NEED_RESCHED already set from new task
+ * wake up path.
+ */
+ if (test_tsk_need_resched(curr))
+ return;
+
cfs_rq_of(pse)->next = pse;
/*
@@ -1620,10 +1627,10 @@ static void task_new_fair(struct rq *rq, struct task_struct *p)
* 'current' within the tree based on its new key value.
*/
swap(curr->vruntime, se->vruntime);
+ resched_task(rq->curr);
}
enqueue_task_fair(rq, p, 0);
- resched_task(rq->curr);
}
/*