diff options
| author | Dave Airlie <airlied@redhat.com> | 2019-11-14 05:53:10 +1000 | 
|---|---|---|
| committer | Dave Airlie <airlied@redhat.com> | 2019-11-14 05:53:10 +1000 | 
| commit | 77e0723bd27f830d0903225372aa778fe2975648 (patch) | |
| tree | 4c035783e014b3a0ac9174390f88dc75150533e4 /kernel/sched/rt.c | |
| parent | 3ca3a9eab7085b3c938b5d088c3020269cfecdc8 (diff) | |
| parent | 31f4f5b495a62c9a8b15b1c3581acd5efeb9af8c (diff) | |
Merge v5.4-rc7 into drm-next
We have the i915 security fixes to backmerge, but first
let's clear the decks for other drivers to avoid a bigger
mess.
Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'kernel/sched/rt.c')
| -rw-r--r-- | kernel/sched/rt.c | 37 | 
1 files changed, 19 insertions, 18 deletions
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index ebaa4e619684..9b8adc01be3d 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -1469,6 +1469,22 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)  	resched_curr(rq);  } +static int balance_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf) +{ +	if (!on_rt_rq(&p->rt) && need_pull_rt_task(rq, p)) { +		/* +		 * This is OK, because current is on_cpu, which avoids it being +		 * picked for load-balance and preemption/IRQs are still +		 * disabled avoiding further scheduler activity on it and we've +		 * not yet started the picking loop. +		 */ +		rq_unpin_lock(rq, rf); +		pull_rt_task(rq); +		rq_repin_lock(rq, rf); +	} + +	return sched_stop_runnable(rq) || sched_dl_runnable(rq) || sched_rt_runnable(rq); +}  #endif /* CONFIG_SMP */  /* @@ -1552,21 +1568,18 @@ static struct task_struct *  pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)  {  	struct task_struct *p; -	struct rt_rq *rt_rq = &rq->rt;  	WARN_ON_ONCE(prev || rf); -	if (!rt_rq->rt_queued) +	if (!sched_rt_runnable(rq))  		return NULL;  	p = _pick_next_task_rt(rq); -  	set_next_task_rt(rq, p); -  	return p;  } -static void put_prev_task_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf) +static void put_prev_task_rt(struct rq *rq, struct task_struct *p)  {  	update_curr_rt(rq); @@ -1578,18 +1591,6 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p, struct rq_fla  	 */  	if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)  		enqueue_pushable_task(rq, p); - -	if (rf && !on_rt_rq(&p->rt) && need_pull_rt_task(rq, p)) { -		/* -		 * This is OK, because current is on_cpu, which avoids it being -		 * picked for load-balance and preemption/IRQs are still -		 * disabled avoiding further scheduler activity on it and we've -		 * not yet started the picking loop. -		 */ -		rq_unpin_lock(rq, rf); -		pull_rt_task(rq); -		rq_repin_lock(rq, rf); -	}  }  #ifdef CONFIG_SMP @@ -2366,8 +2367,8 @@ const struct sched_class rt_sched_class = {  	.set_next_task          = set_next_task_rt,  #ifdef CONFIG_SMP +	.balance		= balance_rt,  	.select_task_rq		= select_task_rq_rt, -  	.set_cpus_allowed       = set_cpus_allowed_common,  	.rq_online              = rq_online_rt,  	.rq_offline             = rq_offline_rt,  | 
