summaryrefslogtreecommitdiff
path: root/kernel/sched
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2016-05-12 09:18:13 +0200
committerIngo Molnar <mingo@kernel.org>2016-05-12 09:18:13 +0200
commiteb60b3e5e8dfdd590e586a6fc22daf2f63a7b7e6 (patch)
tree1b06e2c1beca8f970685eb13096c7a12480526c6 /kernel/sched
parent58fe9c4621b7219e724c0b7af053112f974a08c3 (diff)
parent53d3bc773eaa7ab1cf63585e76af7ee869d5e709 (diff)
Merge branch 'sched/urgent' into sched/core to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/deadline.c1
-rw-r--r--kernel/sched/fair.c29
-rw-r--r--kernel/sched/rt.c1
3 files changed, 16 insertions, 15 deletions
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index ba53a87bb978..0ac6c84f3371 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -1395,6 +1395,7 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
!cpumask_test_cpu(later_rq->cpu,
&task->cpus_allowed) ||
task_running(rq, task) ||
+ !dl_task(task) ||
!task_on_rq_queued(task))) {
double_unlock_balance(rq, later_rq);
later_rq = NULL;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 51f7a4b62985..39fde3660f97 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3099,7 +3099,14 @@ static int idle_balance(struct rq *this_rq);
#else /* CONFIG_SMP */
-static inline void update_load_avg(struct sched_entity *se, int update_tg) {}
+static inline void update_load_avg(struct sched_entity *se, int not_used)
+{
+ struct cfs_rq *cfs_rq = cfs_rq_of(se);
+ struct rq *rq = rq_of(cfs_rq);
+
+ cpufreq_trigger_update(rq_clock(rq));
+}
+
static inline void
enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
static inline void
@@ -3250,25 +3257,17 @@ static inline void check_schedstat_required(void)
static void
enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
- bool renorm = !(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING);
- bool curr = cfs_rq->curr == se;
-
/*
- * If we're the current task, we must renormalise before calling
- * update_curr().
+ * Update the normalized vruntime before updating min_vruntime
+ * through calling update_curr().
*/
- if (renorm && curr)
+ if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING))
se->vruntime += cfs_rq->min_vruntime;
- update_curr(cfs_rq);
-
/*
- * Otherwise, renormalise after, such that we're placed at the current
- * moment in time, instead of some random moment in the past.
+ * Update run-time statistics of the 'current'.
*/
- if (renorm && !curr)
- se->vruntime += cfs_rq->min_vruntime;
-
+ update_curr(cfs_rq);
enqueue_entity_load_avg(cfs_rq, se);
account_entity_enqueue(cfs_rq, se);
update_cfs_shares(cfs_rq);
@@ -3284,7 +3283,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
update_stats_enqueue(cfs_rq, se);
check_spread(cfs_rq, se);
}
- if (!curr)
+ if (se != cfs_rq->curr)
__enqueue_entity(cfs_rq, se);
se->on_rq = 1;
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 68deaf901a12..67afa06cc8bc 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1729,6 +1729,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
!cpumask_test_cpu(lowest_rq->cpu,
tsk_cpus_allowed(task)) ||
task_running(rq, task) ||
+ !rt_task(task) ||
!task_on_rq_queued(task))) {
double_unlock_balance(rq, lowest_rq);