diff options
Diffstat (limited to 'kernel/sched/rt.c')
| -rw-r--r-- | kernel/sched/rt.c | 53 | 
1 files changed, 32 insertions, 21 deletions
| diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index c5565c3c515f..573e1ca01102 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -274,13 +274,16 @@ static void update_rt_migration(struct rt_rq *rt_rq)  static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)  { +	struct task_struct *p; +  	if (!rt_entity_is_task(rt_se))  		return; +	p = rt_task_of(rt_se);  	rt_rq = &rq_of_rt_rq(rt_rq)->rt;  	rt_rq->rt_nr_total++; -	if (rt_se->nr_cpus_allowed > 1) +	if (p->nr_cpus_allowed > 1)  		rt_rq->rt_nr_migratory++;  	update_rt_migration(rt_rq); @@ -288,13 +291,16 @@ static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)  static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)  { +	struct task_struct *p; +  	if (!rt_entity_is_task(rt_se))  		return; +	p = rt_task_of(rt_se);  	rt_rq = &rq_of_rt_rq(rt_rq)->rt;  	rt_rq->rt_nr_total--; -	if (rt_se->nr_cpus_allowed > 1) +	if (p->nr_cpus_allowed > 1)  		rt_rq->rt_nr_migratory--;  	update_rt_migration(rt_rq); @@ -1161,7 +1167,7 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)  	enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD); -	if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1) +	if (!task_current(rq, p) && p->nr_cpus_allowed > 1)  		enqueue_pushable_task(rq, p);  	inc_nr_running(rq); @@ -1225,7 +1231,7 @@ select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)  	cpu = task_cpu(p); -	if (p->rt.nr_cpus_allowed == 1) +	if (p->nr_cpus_allowed == 1)  		goto out;  	/* For anything but wake ups, just return the task_cpu */ @@ -1260,9 +1266,9 @@ select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)  	 * will have to sort it out.  	 */  	if (curr && unlikely(rt_task(curr)) && -	    (curr->rt.nr_cpus_allowed < 2 || +	    (curr->nr_cpus_allowed < 2 ||  	     curr->prio <= p->prio) && -	    (p->rt.nr_cpus_allowed > 1)) { +	    (p->nr_cpus_allowed > 1)) {  		int target = find_lowest_rq(p);  		if (target != -1) @@ -1276,10 +1282,10 @@ out:  static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)  { -	if (rq->curr->rt.nr_cpus_allowed == 1) +	if (rq->curr->nr_cpus_allowed == 1)  		return; -	if (p->rt.nr_cpus_allowed != 1 +	if (p->nr_cpus_allowed != 1  	    && cpupri_find(&rq->rd->cpupri, p, NULL))  		return; @@ -1395,7 +1401,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)  	 * The previous task needs to be made eligible for pushing  	 * if it is still active  	 */ -	if (on_rt_rq(&p->rt) && p->rt.nr_cpus_allowed > 1) +	if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)  		enqueue_pushable_task(rq, p);  } @@ -1408,7 +1414,7 @@ static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)  {  	if (!task_running(rq, p) &&  	    (cpu < 0 || cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) && -	    (p->rt.nr_cpus_allowed > 1)) +	    (p->nr_cpus_allowed > 1))  		return 1;  	return 0;  } @@ -1464,7 +1470,7 @@ static int find_lowest_rq(struct task_struct *task)  	if (unlikely(!lowest_mask))  		return -1; -	if (task->rt.nr_cpus_allowed == 1) +	if (task->nr_cpus_allowed == 1)  		return -1; /* No other targets possible */  	if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask)) @@ -1556,7 +1562,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)  				     task_running(rq, task) ||  				     !task->on_rq)) { -				raw_spin_unlock(&lowest_rq->lock); +				double_unlock_balance(rq, lowest_rq);  				lowest_rq = NULL;  				break;  			} @@ -1586,7 +1592,7 @@ static struct task_struct *pick_next_pushable_task(struct rq *rq)  	BUG_ON(rq->cpu != task_cpu(p));  	BUG_ON(task_current(rq, p)); -	BUG_ON(p->rt.nr_cpus_allowed <= 1); +	BUG_ON(p->nr_cpus_allowed <= 1);  	BUG_ON(!p->on_rq);  	BUG_ON(!rt_task(p)); @@ -1793,9 +1799,9 @@ static void task_woken_rt(struct rq *rq, struct task_struct *p)  	if (!task_running(rq, p) &&  	    !test_tsk_need_resched(rq->curr) &&  	    has_pushable_tasks(rq) && -	    p->rt.nr_cpus_allowed > 1 && +	    p->nr_cpus_allowed > 1 &&  	    rt_task(rq->curr) && -	    (rq->curr->rt.nr_cpus_allowed < 2 || +	    (rq->curr->nr_cpus_allowed < 2 ||  	     rq->curr->prio <= p->prio))  		push_rt_tasks(rq);  } @@ -1817,7 +1823,7 @@ static void set_cpus_allowed_rt(struct task_struct *p,  	 * Only update if the process changes its state from whether it  	 * can migrate or not.  	 */ -	if ((p->rt.nr_cpus_allowed > 1) == (weight > 1)) +	if ((p->nr_cpus_allowed > 1) == (weight > 1))  		return;  	rq = task_rq(p); @@ -1979,6 +1985,8 @@ static void watchdog(struct rq *rq, struct task_struct *p)  static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)  { +	struct sched_rt_entity *rt_se = &p->rt; +  	update_curr_rt(rq);  	watchdog(rq, p); @@ -1996,12 +2004,15 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)  	p->rt.time_slice = RR_TIMESLICE;  	/* -	 * Requeue to the end of queue if we are not the only element -	 * on the queue: +	 * Requeue to the end of queue if we (and all of our ancestors) are the +	 * only element on the queue  	 */ -	if (p->rt.run_list.prev != p->rt.run_list.next) { -		requeue_task_rt(rq, p, 0); -		set_tsk_need_resched(p); +	for_each_sched_rt_entity(rt_se) { +		if (rt_se->run_list.prev != rt_se->run_list.next) { +			requeue_task_rt(rq, p, 0); +			set_tsk_need_resched(p); +			return; +		}  	}  } | 
