diff options
| -rw-r--r-- | kernel/sched/deadline.c | 30 | ||||
| -rw-r--r-- | kernel/sched/sched.h | 6 | 
2 files changed, 20 insertions, 16 deletions
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 01f474a5bd14..9ebd0a9241ed 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -2590,11 +2590,12 @@ void sched_dl_do_global(void)  int sched_dl_overflow(struct task_struct *p, int policy,  		      const struct sched_attr *attr)  { -	struct dl_bw *dl_b = dl_bw_of(task_cpu(p));  	u64 period = attr->sched_period ?: attr->sched_deadline;  	u64 runtime = attr->sched_runtime;  	u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0; -	int cpus, err = -1; +	int cpus, err = -1, cpu = task_cpu(p); +	struct dl_bw *dl_b = dl_bw_of(cpu); +	unsigned long cap;  	if (attr->sched_flags & SCHED_FLAG_SUGOV)  		return 0; @@ -2609,15 +2610,17 @@ int sched_dl_overflow(struct task_struct *p, int policy,  	 * allocated bandwidth of the container.  	 */  	raw_spin_lock(&dl_b->lock); -	cpus = dl_bw_cpus(task_cpu(p)); +	cpus = dl_bw_cpus(cpu); +	cap = dl_bw_capacity(cpu); +  	if (dl_policy(policy) && !task_has_dl_policy(p) && -	    !__dl_overflow(dl_b, cpus, 0, new_bw)) { +	    !__dl_overflow(dl_b, cap, 0, new_bw)) {  		if (hrtimer_active(&p->dl.inactive_timer))  			__dl_sub(dl_b, p->dl.dl_bw, cpus);  		__dl_add(dl_b, new_bw, cpus);  		err = 0;  	} else if (dl_policy(policy) && task_has_dl_policy(p) && -		   !__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) { +		   !__dl_overflow(dl_b, cap, p->dl.dl_bw, new_bw)) {  		/*  		 * XXX this is slightly incorrect: when the task  		 * utilization decreases, we should delay the total @@ -2772,19 +2775,19 @@ bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)  #ifdef CONFIG_SMP  int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed)  { +	unsigned long flags, cap;  	unsigned int dest_cpu;  	struct dl_bw *dl_b;  	bool overflow; -	int cpus, ret; -	unsigned long flags; +	int ret;  	dest_cpu = cpumask_any_and(cpu_active_mask, cs_cpus_allowed);  	rcu_read_lock_sched();  	dl_b = dl_bw_of(dest_cpu);  	raw_spin_lock_irqsave(&dl_b->lock, flags); -	cpus = dl_bw_cpus(dest_cpu); -	overflow = __dl_overflow(dl_b, cpus, 0, p->dl.dl_bw); +	cap = dl_bw_capacity(dest_cpu); +	overflow = __dl_overflow(dl_b, cap, 0, p->dl.dl_bw);  	if (overflow) {  		ret = -EBUSY;  	} else { @@ -2794,6 +2797,8 @@ int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allo  		 * We will free resources in the source root_domain  		 * later on (see set_cpus_allowed_dl()).  		 */ +		int cpus = dl_bw_cpus(dest_cpu); +  		__dl_add(dl_b, p->dl.dl_bw, cpus);  		ret = 0;  	} @@ -2826,16 +2831,15 @@ int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur,  bool dl_cpu_busy(unsigned int cpu)  { -	unsigned long flags; +	unsigned long flags, cap;  	struct dl_bw *dl_b;  	bool overflow; -	int cpus;  	rcu_read_lock_sched();  	dl_b = dl_bw_of(cpu);  	raw_spin_lock_irqsave(&dl_b->lock, flags); -	cpus = dl_bw_cpus(cpu); -	overflow = __dl_overflow(dl_b, cpus, 0, 0); +	cap = dl_bw_capacity(cpu); +	overflow = __dl_overflow(dl_b, cap, 0, 0);  	raw_spin_unlock_irqrestore(&dl_b->lock, flags);  	rcu_read_unlock_sched(); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 8d5d06881294..91b250f265c0 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -310,11 +310,11 @@ void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus)  	__dl_update(dl_b, -((s32)tsk_bw / cpus));  } -static inline -bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw) +static inline bool __dl_overflow(struct dl_bw *dl_b, unsigned long cap, +				 u64 old_bw, u64 new_bw)  {  	return dl_b->bw != -1 && -	       dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw; +	       cap_scale(dl_b->bw, cap) < dl_b->total_bw - old_bw + new_bw;  }  extern void init_dl_bw(struct dl_bw *dl_b);  | 
