summaryrefslogtreecommitdiff
path: root/kernel/sched
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2015-06-11 14:46:40 +0200
committerThomas Gleixner <tglx@linutronix.de>2015-06-19 00:25:26 +0200
commit8046d6806247088de5725eaf8a2580b29e50ac5a (patch)
tree08f620cfc69d58effc57115fac4703d06e5ef2ef /kernel/sched
parent4c9a4bc89a9cca8128bce67d6bc8870d6b7ee0b2 (diff)
sched,rt: Remove return value from pull_rt_task()
In order to be able to use pull_rt_task() from a callback, we need to do away with the return value. Since the return value indicates if we should reschedule, do this inside the function. Since not all callers currently do this, this can increase the number of reschedules due rt balancing. Too many reschedules is not a correctness issues, too few are. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: ktkhai@parallels.com Cc: rostedt@goodmis.org Cc: juri.lelli@gmail.com Cc: pang.xunlei@linaro.org Cc: oleg@redhat.com Cc: wanpeng.li@linux.intel.com Cc: umgwanakikbuti@gmail.com Link: http://lkml.kernel.org/r/20150611124742.679002000@infradead.org Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/rt.c22
1 files changed, 11 insertions, 11 deletions
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 4f3726fe1246..c702b48de9f0 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -260,7 +260,7 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
#ifdef CONFIG_SMP
-static int pull_rt_task(struct rq *this_rq);
+static void pull_rt_task(struct rq *this_rq);
static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
{
@@ -415,9 +415,8 @@ static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
return false;
}
-static inline int pull_rt_task(struct rq *this_rq)
+static inline void pull_rt_task(struct rq *this_rq)
{
- return 0;
}
static inline void queue_push_tasks(struct rq *rq)
@@ -1955,14 +1954,15 @@ static void push_irq_work_func(struct irq_work *work)
}
#endif /* HAVE_RT_PUSH_IPI */
-static int pull_rt_task(struct rq *this_rq)
+static void pull_rt_task(struct rq *this_rq)
{
- int this_cpu = this_rq->cpu, ret = 0, cpu;
+ int this_cpu = this_rq->cpu, cpu;
+ bool resched = false;
struct task_struct *p;
struct rq *src_rq;
if (likely(!rt_overloaded(this_rq)))
- return 0;
+ return;
/*
* Match the barrier from rt_set_overloaded; this guarantees that if we
@@ -1973,7 +1973,7 @@ static int pull_rt_task(struct rq *this_rq)
#ifdef HAVE_RT_PUSH_IPI
if (sched_feat(RT_PUSH_IPI)) {
tell_cpu_to_push(this_rq);
- return 0;
+ return;
}
#endif
@@ -2026,7 +2026,7 @@ static int pull_rt_task(struct rq *this_rq)
if (p->prio < src_rq->curr->prio)
goto skip;
- ret = 1;
+ resched = true;
deactivate_task(src_rq, p, 0);
set_task_cpu(p, this_cpu);
@@ -2042,7 +2042,8 @@ skip:
double_unlock_balance(this_rq, src_rq);
}
- return ret;
+ if (resched)
+ resched_curr(this_rq);
}
/*
@@ -2138,8 +2139,7 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p)
if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
return;
- if (pull_rt_task(rq))
- resched_curr(rq);
+ pull_rt_task(rq);
}
void __init init_sched_rt_class(void)